summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/src
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/PRESUBMIT.py2
-rw-r--r--deps/v8/src/allocation.cc7
-rw-r--r--deps/v8/src/api-arguments.cc7
-rw-r--r--deps/v8/src/api-arguments.h9
-rw-r--r--deps/v8/src/api-natives.cc15
-rw-r--r--deps/v8/src/api-natives.h4
-rw-r--r--deps/v8/src/api.cc550
-rw-r--r--deps/v8/src/api.h4
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h63
-rw-r--r--deps/v8/src/arm/assembler-arm.cc62
-rw-r--r--deps/v8/src/arm/assembler-arm.h98
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc16
-rw-r--r--deps/v8/src/arm/codegen-arm.cc10
-rw-r--r--deps/v8/src/arm/constants-arm.h4
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc3
-rw-r--r--deps/v8/src/arm/disasm-arm.cc10
-rw-r--r--deps/v8/src/arm/frame-constants-arm.h6
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc6
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc165
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h30
-rw-r--r--deps/v8/src/arm/simulator-arm.cc23
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h40
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc20
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h34
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc38
-rw-r--r--deps/v8/src/arm64/constants-arm64.h11
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h6
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc7
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc20
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h6
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc8
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h6
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h137
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc388
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h143
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc60
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h1
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc44
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc6
-rw-r--r--deps/v8/src/asmjs/switch-logic.h6
-rw-r--r--deps/v8/src/assembler.cc106
-rw-r--r--deps/v8/src/assembler.h107
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.h6
-rw-r--r--deps/v8/src/ast/ast-numbering.cc410
-rw-r--r--deps/v8/src/ast/ast-numbering.h55
-rw-r--r--deps/v8/src/ast/ast.cc6
-rw-r--r--deps/v8/src/ast/ast.h106
-rw-r--r--deps/v8/src/ast/compile-time-value.h6
-rw-r--r--deps/v8/src/ast/prettyprinter.cc24
-rw-r--r--deps/v8/src/ast/scopes.cc57
-rw-r--r--deps/v8/src/ast/scopes.h18
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base.isolate2
-rw-r--r--deps/v8/src/base/atomic-utils.h6
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h4
-rw-r--r--deps/v8/src/base/atomicops_internals_std.h4
-rw-r--r--deps/v8/src/base/cpu.cc3
-rw-r--r--deps/v8/src/base/file-utils.h2
-rw-r--r--deps/v8/src/base/format-macros.h6
-rw-r--r--deps/v8/src/base/logging.cc38
-rw-r--r--deps/v8/src/base/logging.h12
-rw-r--r--deps/v8/src/base/macros.h2
-rw-r--r--deps/v8/src/base/optional.h8
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc16
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc21
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc9
-rw-r--r--deps/v8/src/base/platform/time.cc370
-rw-r--r--deps/v8/src/base/platform/time.h138
-rw-r--r--deps/v8/src/base/qnx-math.h2
-rw-r--r--deps/v8/src/base/sys-info.cc6
-rw-r--r--deps/v8/src/base/template-utils.h6
-rw-r--r--deps/v8/src/base/v8-fallthrough.h21
-rw-r--r--deps/v8/src/bit-vector.h6
-rw-r--r--deps/v8/src/bootstrapper.cc402
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc129
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc167
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1355
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h156
-rw-r--r--deps/v8/src/builtins/builtins-array.cc7
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc96
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc211
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc223
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc61
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc405
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc135
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc88
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h150
-rw-r--r--deps/v8/src/builtins/builtins-function.cc20
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc359
-rw-r--r--deps/v8/src/builtins/builtins-intl.h2
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc332
-rw-r--r--deps/v8/src/builtins/builtins-object.cc25
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc1755
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h81
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc371
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc79
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc328
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc1278
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.h133
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc70
-rw-r--r--deps/v8/src/builtins/builtins.cc1138
-rw-r--r--deps/v8/src/builtins/builtins.h22
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc83
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h48
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc100
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h56
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc107
-rw-r--r--deps/v8/src/builtins/mips/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc121
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc128
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc136
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc134
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc4
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc115
-rw-r--r--deps/v8/src/code-events.h21
-rw-r--r--deps/v8/src/code-stub-assembler.cc1579
-rw-r--r--deps/v8/src/code-stub-assembler.h337
-rw-r--r--deps/v8/src/code-stubs.cc46
-rw-r--r--deps/v8/src/code-stubs.h4
-rw-r--r--deps/v8/src/compilation-cache.cc85
-rw-r--r--deps/v8/src/compilation-cache.h49
-rw-r--r--deps/v8/src/compilation-dependencies.h6
-rw-r--r--deps/v8/src/compilation-info.cc28
-rw-r--r--deps/v8/src/compilation-info.h87
-rw-r--r--deps/v8/src/compilation-statistics.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc6
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc2
-rw-r--r--deps/v8/src/compiler.cc622
-rw-r--r--deps/v8/src/compiler.h99
-rw-r--r--deps/v8/src/compiler/OWNERS4
-rw-r--r--deps/v8/src/compiler/access-builder.cc16
-rw-r--r--deps/v8/src/compiler/access-builder.h8
-rw-r--r--deps/v8/src/compiler/access-info.cc18
-rw-r--r--deps/v8/src/compiler/access-info.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc878
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc267
-rw-r--r--deps/v8/src/compiler/arm/unwinding-info-writer-arm.h6
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc522
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h3
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc3
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc327
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h6
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc287
-rw-r--r--deps/v8/src/compiler/branch-elimination.h75
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc334
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h50
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc308
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h26
-rw-r--r--deps/v8/src/compiler/c-linkage.cc3
-rw-r--r--deps/v8/src/compiler/code-assembler.cc105
-rw-r--r--deps/v8/src/compiler/code-assembler.h61
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h7
-rw-r--r--deps/v8/src/compiler/code-generator.cc228
-rw-r--r--deps/v8/src/compiler/code-generator.h57
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc56
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/common-operator.cc262
-rw-r--r--deps/v8/src/compiler/common-operator.h64
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc10
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc369
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h7
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc5
-rw-r--r--deps/v8/src/compiler/frame-states.cc67
-rw-r--r--deps/v8/src/compiler/frame-states.h14
-rw-r--r--deps/v8/src/compiler/functional-list.h122
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc4
-rw-r--r--deps/v8/src/compiler/graph-assembler.h7
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc855
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h12
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc10
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc315
-rw-r--r--deps/v8/src/compiler/instruction-codes.h100
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc89
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h86
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc244
-rw-r--r--deps/v8/src/compiler/instruction-selector.h28
-rw-r--r--deps/v8/src/compiler/instruction.cc24
-rw-r--r--deps/v8/src/compiler/instruction.h12
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc34
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc796
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h40
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc2144
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h70
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc176
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc76
-rw-r--r--deps/v8/src/compiler/js-graph.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc12
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc84
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h10
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc187
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-operator.cc103
-rw-r--r--deps/v8/src/compiler/js-operator.h40
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc38
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc71
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h3
-rw-r--r--deps/v8/src/compiler/jump-threading.cc94
-rw-r--r--deps/v8/src/compiler/jump-threading.h2
-rw-r--r--deps/v8/src/compiler/linkage.cc6
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/live-range-separator.h6
-rw-r--r--deps/v8/src/compiler/load-elimination.cc10
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc132
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h19
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc53
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc186
-rw-r--r--deps/v8/src/compiler/machine-operator.h30
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc32
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h3
-rw-r--r--deps/v8/src/compiler/mips/OWNERS1
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc332
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc244
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS3
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc404
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h6
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc261
-rw-r--r--deps/v8/src/compiler/move-optimizer.h6
-rw-r--r--deps/v8/src/compiler/node-aux-data.h18
-rw-r--r--deps/v8/src/compiler/node-properties.cc21
-rw-r--r--deps/v8/src/compiler/node-properties.h2
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h12
-rw-r--r--deps/v8/src/compiler/opcodes.h181
-rw-r--r--deps/v8/src/compiler/operation-typer.cc2
-rw-r--r--deps/v8/src/compiler/operator-properties.cc3
-rw-r--r--deps/v8/src/compiler/persistent-map.h57
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc103
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc194
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc264
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc84
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h20
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h6
-rw-r--r--deps/v8/src/compiler/register-allocator.cc6
-rw-r--r--deps/v8/src/compiler/register-allocator.h6
-rw-r--r--deps/v8/src/compiler/representation-change.cc16
-rw-r--r--deps/v8/src/compiler/representation-change.h24
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc191
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h8
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc8
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc298
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc130
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h5
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc149
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc181
-rw-r--r--deps/v8/src/compiler/simplified-operator.h36
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc11
-rw-r--r--deps/v8/src/compiler/type-cache.h6
-rw-r--r--deps/v8/src/compiler/typer.cc52
-rw-r--r--deps/v8/src/compiler/types.cc30
-rw-r--r--deps/v8/src/compiler/unwinding-info-writer.h2
-rw-r--r--deps/v8/src/compiler/verifier.cc139
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc984
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h56
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc68
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc650
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h5
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc3
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc255
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.h6
-rw-r--r--deps/v8/src/contexts.h136
-rw-r--r--deps/v8/src/conversions-inl.h15
-rw-r--r--deps/v8/src/conversions.cc44
-rw-r--r--deps/v8/src/conversions.h3
-rw-r--r--deps/v8/src/counters.cc16
-rw-r--r--deps/v8/src/counters.h59
-rw-r--r--deps/v8/src/d8-posix.cc4
-rw-r--r--deps/v8/src/d8.cc109
-rw-r--r--deps/v8/src/d8.gyp161
-rw-r--r--deps/v8/src/d8.h8
-rw-r--r--deps/v8/src/date.h4
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc3
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc173
-rw-r--r--deps/v8/src/debug/debug-frames.cc5
-rw-r--r--deps/v8/src/debug/debug-interface.h6
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc3
-rw-r--r--deps/v8/src/debug/debug-scopes.cc9
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc8
-rw-r--r--deps/v8/src/debug/debug.cc387
-rw-r--r--deps/v8/src/debug/debug.h81
-rw-r--r--deps/v8/src/debug/debug.js633
-rw-r--r--deps/v8/src/debug/interface-types.h30
-rw-r--r--deps/v8/src/debug/liveedit.cc9
-rw-r--r--deps/v8/src/debug/liveedit.h2
-rw-r--r--deps/v8/src/debug/mips/OWNERS1
-rw-r--r--deps/v8/src/debug/mips64/OWNERS1
-rw-r--r--deps/v8/src/debug/mirrors.js12
-rw-r--r--deps/v8/src/deoptimize-reason.h4
-rw-r--r--deps/v8/src/deoptimizer.cc31
-rw-r--r--deps/v8/src/disasm.h8
-rw-r--r--deps/v8/src/disassembler.cc6
-rw-r--r--deps/v8/src/eh-frame.h2
-rw-r--r--deps/v8/src/elements-kind.cc88
-rw-r--r--deps/v8/src/elements-kind.h8
-rw-r--r--deps/v8/src/elements.cc336
-rw-r--r--deps/v8/src/elements.h15
-rw-r--r--deps/v8/src/execution.h3
-rw-r--r--deps/v8/src/external-reference-table.cc2
-rw-r--r--deps/v8/src/factory.cc213
-rw-r--r--deps/v8/src/factory.h50
-rw-r--r--deps/v8/src/fast-dtoa.cc30
-rw-r--r--deps/v8/src/feedback-vector.cc729
-rw-r--r--deps/v8/src/feedback-vector.h460
-rw-r--r--deps/v8/src/field-index-inl.h2
-rw-r--r--deps/v8/src/field-index.h2
-rw-r--r--deps/v8/src/field-type.h1
-rw-r--r--deps/v8/src/flag-definitions.h72
-rw-r--r--deps/v8/src/frames.cc177
-rw-r--r--deps/v8/src/frames.h32
-rw-r--r--deps/v8/src/gdb-jit.h2
-rw-r--r--deps/v8/src/global-handles.cc78
-rw-r--r--deps/v8/src/global-handles.h6
-rw-r--r--deps/v8/src/globals.h241
-rw-r--r--deps/v8/src/handler-table.cc220
-rw-r--r--deps/v8/src/handler-table.h135
-rw-r--r--deps/v8/src/heap-symbols.h487
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc107
-rw-r--r--deps/v8/src/heap/concurrent-marking.h77
-rw-r--r--deps/v8/src/heap/heap-inl.h2
-rw-r--r--deps/v8/src/heap/heap.cc323
-rw-r--r--deps/v8/src/heap/heap.h78
-rw-r--r--deps/v8/src/heap/incremental-marking.cc24
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h6
-rw-r--r--deps/v8/src/heap/invalidated-slots.h6
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc130
-rw-r--r--deps/v8/src/heap/item-parallel-job.h134
-rw-r--r--deps/v8/src/heap/mark-compact.cc160
-rw-r--r--deps/v8/src/heap/mark-compact.h5
-rw-r--r--deps/v8/src/heap/marking.h6
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc878
-rw-r--r--deps/v8/src/heap/object-stats.h88
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.h7
-rw-r--r--deps/v8/src/heap/remembered-set.h9
-rw-r--r--deps/v8/src/heap/scavenge-job.h2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h9
-rw-r--r--deps/v8/src/heap/scavenger.cc9
-rw-r--r--deps/v8/src/heap/scavenger.h5
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc32
-rw-r--r--deps/v8/src/heap/slot-set.h6
-rw-r--r--deps/v8/src/heap/spaces-inl.h32
-rw-r--r--deps/v8/src/heap/spaces.cc190
-rw-r--r--deps/v8/src/heap/spaces.h95
-rw-r--r--deps/v8/src/heap/store-buffer.h6
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h2
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h2
-rw-r--r--deps/v8/src/heap/sweeper.cc8
-rw-r--r--deps/v8/src/heap/worklist.h6
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h40
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc550
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h635
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc8
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc5
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h6
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc175
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h78
-rw-r--r--deps/v8/src/ia32/sse-instr.h6
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc95
-rw-r--r--deps/v8/src/ic/accessor-assembler.h6
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h6
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic.cc206
-rw-r--r--deps/v8/src/ic/ic.h66
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc30
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h6
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/identity-map.cc13
-rw-r--r--deps/v8/src/identity-map.h11
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/injected-script.cc8
-rw-r--r--deps/v8/src/inspector/injected-script.h6
-rw-r--r--deps/v8/src/inspector/inspected-context.h6
-rw-r--r--deps/v8/src/inspector/inspector.gyp88
-rw-r--r--deps/v8/src/inspector/inspector.gypi90
-rw-r--r--deps/v8/src/inspector/remote-object-id.h6
-rw-r--r--deps/v8/src/inspector/search-util.h6
-rw-r--r--deps/v8/src/inspector/string-16.cc13
-rw-r--r--deps/v8/src/inspector/string-16.h6
-rw-r--r--deps/v8/src/inspector/string-util.h6
-rw-r--r--deps/v8/src/inspector/test-interface.h2
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc7
-rw-r--r--deps/v8/src/inspector/v8-console-message.h6
-rw-r--r--deps/v8/src/inspector/v8-console.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc24
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h10
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc18
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger.h7
-rw-r--r--deps/v8/src/inspector/v8-function-call.h6
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h6
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.h6
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-regex.h6
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-value-utils.h6
-rw-r--r--deps/v8/src/inspector/wasm-translation.h6
-rw-r--r--deps/v8/src/instruction-stream.cc66
-rw-r--r--deps/v8/src/instruction-stream.h48
-rw-r--r--deps/v8/src/interface-descriptors.cc15
-rw-r--r--deps/v8/src/interface-descriptors.h29
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc24
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc21
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h9
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc234
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h28
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc11
-rw-r--r--deps/v8/src/interpreter/bytecodes.h34
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc24
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h13
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc18
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc339
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h177
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc514
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc398
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.h12
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h10
-rw-r--r--deps/v8/src/interpreter/interpreter.cc30
-rw-r--r--deps/v8/src/interpreter/interpreter.h17
-rw-r--r--deps/v8/src/isolate-inl.h2
-rw-r--r--deps/v8/src/isolate.cc593
-rw-r--r--deps/v8/src/isolate.h148
-rw-r--r--deps/v8/src/js/OWNERS1
-rw-r--r--deps/v8/src/js/array.js75
-rw-r--r--deps/v8/src/js/prologue.js13
-rw-r--r--deps/v8/src/js/typedarray.js291
-rw-r--r--deps/v8/src/json-parser.cc43
-rw-r--r--deps/v8/src/json-parser.h9
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.h6
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h6
-rw-r--r--deps/v8/src/libsampler/sampler.cc97
-rw-r--r--deps/v8/src/locked-queue-inl.h6
-rw-r--r--deps/v8/src/locked-queue.h6
-rw-r--r--deps/v8/src/log.cc211
-rw-r--r--deps/v8/src/log.h25
-rw-r--r--deps/v8/src/lookup.cc129
-rw-r--r--deps/v8/src/lookup.h23
-rw-r--r--deps/v8/src/messages.h13
-rw-r--r--deps/v8/src/mips/OWNERS1
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h22
-rw-r--r--deps/v8/src/mips/assembler-mips.cc42
-rw-r--r--deps/v8/src/mips/assembler-mips.h24
-rw-r--r--deps/v8/src/mips/codegen-mips.cc8
-rw-r--r--deps/v8/src/mips/constants-mips.h6
-rw-r--r--deps/v8/src/mips/cpu-mips.cc4
-rw-r--r--deps/v8/src/mips/disasm-mips.cc1
-rw-r--r--deps/v8/src/mips/frame-constants-mips.h6
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc6
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc150
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h24
-rw-r--r--deps/v8/src/mips/simulator-mips.cc16
-rw-r--r--deps/v8/src/mips/simulator-mips.h17
-rw-r--r--deps/v8/src/mips64/OWNERS3
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h29
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc40
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h29
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h6
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc8
-rw-r--r--deps/v8/src/mips64/constants-mips64.h6
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc4
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc1
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.h6
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc6
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc212
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h32
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc16
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h23
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h9
-rw-r--r--deps/v8/src/objects-debug.cc189
-rw-r--r--deps/v8/src/objects-inl.h145
-rw-r--r--deps/v8/src/objects-printer.cc453
-rw-r--r--deps/v8/src/objects.cc977
-rw-r--r--deps/v8/src/objects.h375
-rw-r--r--deps/v8/src/objects/bigint.cc108
-rw-r--r--deps/v8/src/objects/bigint.h12
-rw-r--r--deps/v8/src/objects/code-inl.h77
-rw-r--r--deps/v8/src/objects/code.h134
-rw-r--r--deps/v8/src/objects/compilation-cache.h31
-rw-r--r--deps/v8/src/objects/data-handler-inl.h6
-rw-r--r--deps/v8/src/objects/data-handler.h6
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.cc159
-rw-r--r--deps/v8/src/objects/debug-objects.h37
-rw-r--r--deps/v8/src/objects/dictionary.h52
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h107
-rw-r--r--deps/v8/src/objects/fixed-array.h30
-rw-r--r--deps/v8/src/objects/intl-objects.cc1
-rw-r--r--deps/v8/src/objects/js-array-inl.h29
-rw-r--r--deps/v8/src/objects/js-array.h13
-rw-r--r--deps/v8/src/objects/js-promise-inl.h40
-rw-r--r--deps/v8/src/objects/js-promise.h105
-rw-r--r--deps/v8/src/objects/js-regexp.h2
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h6
-rw-r--r--deps/v8/src/objects/literal-objects.cc11
-rw-r--r--deps/v8/src/objects/map-inl.h1
-rw-r--r--deps/v8/src/objects/map.h2
-rw-r--r--deps/v8/src/objects/microtask-inl.h31
-rw-r--r--deps/v8/src/objects/microtask.h77
-rw-r--r--deps/v8/src/objects/module.h9
-rw-r--r--deps/v8/src/objects/name-inl.h21
-rw-r--r--deps/v8/src/objects/name.h13
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h2
-rw-r--r--deps/v8/src/objects/promise-inl.h48
-rw-r--r--deps/v8/src/objects/promise.h168
-rw-r--r--deps/v8/src/objects/scope-info.cc41
-rw-r--r--deps/v8/src/objects/scope-info.h7
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h5
-rw-r--r--deps/v8/src/objects/shared-function-info.h20
-rw-r--r--deps/v8/src/objects/string.h2
-rw-r--r--deps/v8/src/objects/template-objects.cc138
-rw-r--r--deps/v8/src/objects/template-objects.h50
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/background-parsing-task.cc112
-rw-r--r--deps/v8/src/parsing/background-parsing-task.h74
-rw-r--r--deps/v8/src/parsing/expression-classifier.h6
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parse-info.h14
-rw-r--r--deps/v8/src/parsing/parser-base.h207
-rw-r--r--deps/v8/src/parsing/parser.cc290
-rw-r--r--deps/v8/src/parsing/parser.h70
-rw-r--r--deps/v8/src/parsing/parsing.cc7
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h32
-rw-r--r--deps/v8/src/parsing/preparse-data.cc43
-rw-r--r--deps/v8/src/parsing/preparse-data.h53
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc19
-rw-r--r--deps/v8/src/parsing/preparser.cc3
-rw-r--r--deps/v8/src/parsing/preparser.h80
-rw-r--r--deps/v8/src/parsing/scanner.cc127
-rw-r--r--deps/v8/src/parsing/scanner.h6
-rw-r--r--deps/v8/src/parsing/token.h2
-rw-r--r--deps/v8/src/perf-jit.cc62
-rw-r--r--deps/v8/src/perf-jit.h20
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h42
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc23
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h19
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc5
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc5
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc1
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc10
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc64
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h46
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc141
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h4
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc1
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc3
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc36
-rw-r--r--deps/v8/src/profiler/heap-profiler.h21
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc443
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h23
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h5
-rw-r--r--deps/v8/src/profiler/profile-generator.cc74
-rw-r--r--deps/v8/src/profiler/profile-generator.h25
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc51
-rw-r--r--deps/v8/src/profiler/profiler-listener.h8
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc18
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/strings-storage.cc2
-rw-r--r--deps/v8/src/profiler/strings-storage.h2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc7
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h6
-rw-r--r--deps/v8/src/profiler/unbound-queue.h6
-rw-r--r--deps/v8/src/property.h6
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc37
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc2
-rw-r--r--deps/v8/src/regexp/jsregexp.cc32
-rw-r--r--deps/v8/src/regexp/mips/OWNERS3
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc22
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h6
-rw-r--r--deps/v8/src/register-configuration.cc89
-rw-r--r--deps/v8/src/register-configuration.h9
-rw-r--r--deps/v8/src/runtime/runtime-array.cc14
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc24
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc7
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc29
-rw-r--r--deps/v8/src/runtime/runtime-date.cc9
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc264
-rw-r--r--deps/v8/src/runtime/runtime-error.cc6
-rw-r--r--deps/v8/src/runtime/runtime-function.cc17
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc36
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc81
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc15
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc4
-rw-r--r--deps/v8/src/runtime/runtime-module.cc18
-rw-r--r--deps/v8/src/runtime/runtime-object.cc154
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc99
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc115
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc8
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc36
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc31
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc25
-rw-r--r--deps/v8/src/runtime/runtime-test.cc178
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc81
-rw-r--r--deps/v8/src/runtime/runtime.h220
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h42
-rw-r--r--deps/v8/src/s390/assembler-s390.cc19
-rw-r--r--deps/v8/src/s390/assembler-s390.h17
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc5
-rw-r--r--deps/v8/src/s390/codegen-s390.cc5
-rw-r--r--deps/v8/src/s390/disasm-s390.cc1
-rw-r--r--deps/v8/src/s390/frame-constants-s390.h6
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc6
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc53
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h40
-rw-r--r--deps/v8/src/s390/simulator-s390.cc9
-rw-r--r--deps/v8/src/s390/simulator-s390.h4
-rw-r--r--deps/v8/src/safepoint-table.cc2
-rw-r--r--deps/v8/src/simulator-base.cc50
-rw-r--r--deps/v8/src/simulator-base.h32
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc31
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h3
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h3
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc20
-rw-r--r--deps/v8/src/snapshot/code-serializer.h32
-rw-r--r--deps/v8/src/snapshot/deserializer.cc36
-rw-r--r--deps/v8/src/snapshot/deserializer.h3
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc5
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc3
-rw-r--r--deps/v8/src/snapshot/serializer-common.h40
-rw-r--r--deps/v8/src/snapshot/serializer.cc9
-rw-r--r--deps/v8/src/snapshot/serializer.h13
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc48
-rw-r--r--deps/v8/src/snapshot/snapshot.h1
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc3
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc16
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h6
-rw-r--r--deps/v8/src/string-case.h2
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp59
-rw-r--r--deps/v8/src/tracing/trace-event.h6
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc11
-rw-r--r--deps/v8/src/trap-handler/trap-handler-internal.h6
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h6
-rw-r--r--deps/v8/src/unicode-decoder.cc128
-rw-r--r--deps/v8/src/unicode-decoder.h133
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/utils.h13
-rw-r--r--deps/v8/src/v8.cc1
-rw-r--r--deps/v8/src/v8.gyp2630
-rw-r--r--deps/v8/src/v8.h5
-rw-r--r--deps/v8/src/v8memory.h6
-rw-r--r--deps/v8/src/value-serializer.cc68
-rw-r--r--deps/v8/src/value-serializer.h7
-rw-r--r--deps/v8/src/version.h6
-rw-r--r--deps/v8/src/visitors.cc23
-rw-r--r--deps/v8/src/visitors.h65
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h182
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h182
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h514
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h61
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc348
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h165
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc807
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h129
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h520
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h465
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h182
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h182
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h431
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc10
-rw-r--r--deps/v8/src/wasm/compilation-manager.h11
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h90
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc23
-rw-r--r--deps/v8/src/wasm/module-compiler.cc607
-rw-r--r--deps/v8/src/wasm/module-compiler.h32
-rw-r--r--deps/v8/src/wasm/module-decoder.cc13
-rw-r--r--deps/v8/src/wasm/wasm-api.cc31
-rw-r--r--deps/v8/src/wasm/wasm-api.h35
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc163
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h71
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc70
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h17
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc17
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h10
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc21
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc101
-rw-r--r--deps/v8/src/wasm/wasm-engine.h48
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h7
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc181
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h7
-rw-r--r--deps/v8/src/wasm/wasm-js.cc97
-rw-r--r--deps/v8/src/wasm/wasm-js.h16
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc93
-rw-r--r--deps/v8/src/wasm/wasm-memory.h6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc8
-rw-r--r--deps/v8/src/wasm/wasm-module.h8
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h6
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc467
-rw-r--r--deps/v8/src/wasm/wasm-objects.h59
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc20
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h43
-rw-r--r--deps/v8/src/wasm/wasm-result.h6
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc87
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h6
-rw-r--r--deps/v8/src/wasm/wasm-text.cc3
-rw-r--r--deps/v8/src/wasm/wasm-text.h6
-rw-r--r--deps/v8/src/wasm/wasm-value.h6
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h134
-rw-r--r--deps/v8/src/x64/assembler-x64.cc763
-rw-r--r--deps/v8/src/x64/assembler-x64.h746
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc9
-rw-r--r--deps/v8/src/x64/codegen-x64.cc4
-rw-r--r--deps/v8/src/x64/disasm-x64.cc13
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h6
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc7
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc441
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h283
-rw-r--r--deps/v8/src/x64/sse-instr.h6
-rw-r--r--deps/v8/src/zone/accounting-allocator.h1
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h281
-rw-r--r--deps/v8/src/zone/zone-containers.h6
-rw-r--r--deps/v8/src/zone/zone-handle-set.h4
770 files changed, 37851 insertions, 31778 deletions
diff --git a/deps/v8/src/PRESUBMIT.py b/deps/v8/src/PRESUBMIT.py
index d928a60689..b97eefaeb0 100644
--- a/deps/v8/src/PRESUBMIT.py
+++ b/deps/v8/src/PRESUBMIT.py
@@ -24,6 +24,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
- 'master.tryserver.chromium.linux:linux_chromium_rel_ng'
+ 'luci.chromium.try:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index e17de159c1..5493b34789 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -143,6 +143,8 @@ void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) {
+ DCHECK_EQ(address, AlignedAddress(address, alignment));
+ DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
@@ -160,6 +162,7 @@ void* AllocatePages(void* address, size_t size, size_t alignment,
}
bool FreePages(void* address, const size_t size) {
+ DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
bool result = GetPageAllocator()->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
@@ -260,7 +263,9 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
- CHECK(FreePages(address, size));
+ // FreePages expects size to be aligned to allocation granularity. Trimming
+ // may leave size at only commit granularity. Align it here.
+ CHECK(FreePages(address, RoundUp(size, AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index 1302e32b66..502b8cbdca 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -13,13 +13,16 @@
namespace v8 {
namespace internal {
-Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
+Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
Isolate* isolate = this->isolate();
+ LOG(isolate, ApiObjectAccess("call", holder()));
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
+ v8::FunctionCallback f =
+ v8::ToCData<v8::FunctionCallback>(handler->callback());
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 42d58b88a8..413a72a3ae 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -19,7 +19,8 @@ template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
virtual inline void IterateInstance(RootVisitor* v) {
- v->VisitRootPointers(Root::kRelocatable, values_, values_ + kArrayLength);
+ v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
+ values_ + kArrayLength);
}
protected:
@@ -215,9 +216,13 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- Handle<Object> Call(FunctionCallback f);
+ Handle<Object> Call(CallHandlerInfo* handler);
private:
+ inline JSObject* holder() {
+ return JSObject::cast(this->begin()[T::kHolderIndex]);
+ }
+
internal::Object** argv_;
int argc_;
};
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index b8f03a89a8..488b99fd25 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -285,10 +285,10 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> slow_cache =
+ Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
@@ -313,9 +313,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> cache =
+ Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
- auto new_cache = NumberDictionary::Set(cache, serial_number, object);
+ auto new_cache = SimpleNumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@@ -334,11 +334,11 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> cache =
+ Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
- DCHECK_NE(NumberDictionary::kNotFound, entry);
- cache = NumberDictionary::DeleteEntry(cache, entry);
+ DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
+ cache = SimpleNumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@@ -726,7 +726,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
map->set_is_callable(true);
- map->set_is_constructor(true);
}
if (immutable_proto) map->set_is_immutable_proto(true);
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 455be0dd06..398f198ae5 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -5,6 +5,8 @@
#ifndef V8_API_NATIVES_H_
#define V8_API_NATIVES_H_
+#include "include/v8.h"
+#include "src/base/macros.h"
#include "src/handles.h"
#include "src/property-details.h"
@@ -62,4 +64,4 @@ class ApiNatives {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_API_NATIVES_H_
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index d258c87853..8531cd5c05 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -11,7 +11,6 @@
#include <cmath> // For isnan.
#include <limits>
#include <vector>
-#include "include/v8-debug.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "include/v8-util.h"
@@ -34,6 +33,7 @@
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/debug/debug-coverage.h"
+#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -49,7 +49,6 @@
#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/objects-inl.h"
-#include "src/parsing/background-parsing-task.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -461,16 +460,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
AllocationMode mode) {
- switch (mode) {
- case AllocationMode::kNormal: {
- Free(data, length);
- return;
- }
- case AllocationMode::kReservation: {
- UNIMPLEMENTED();
- return;
- }
- }
+ UNIMPLEMENTED();
}
void v8::ArrayBuffer::Allocator::SetProtection(
@@ -483,7 +473,7 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
- virtual void* Allocate(size_t length) {
+ void* Allocate(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@@ -494,7 +484,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
- virtual void* AllocateUninitialized(size_t length) {
+ void* AllocateUninitialized(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@@ -505,42 +495,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
- virtual void Free(void* data, size_t) { free(data); }
-
- virtual void* Reserve(size_t length) {
- size_t page_size = i::AllocatePageSize();
- size_t allocated = RoundUp(length, page_size);
- void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
- page_size, PageAllocator::kNoAccess);
- return address;
- }
-
- virtual void Free(void* data, size_t length,
- v8::ArrayBuffer::Allocator::AllocationMode mode) {
- switch (mode) {
- case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
- return Free(data, length);
- }
- case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- size_t page_size = i::AllocatePageSize();
- size_t allocated = RoundUp(length, page_size);
- CHECK(i::FreePages(data, allocated));
- return;
- }
- }
- }
-
- virtual void SetProtection(
- void* data, size_t length,
- v8::ArrayBuffer::Allocator::Protection protection) {
- DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
- protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- PageAllocator::Permission permission =
- (protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
- ? PageAllocator::kReadWrite
- : PageAllocator::kNoAccess;
- CHECK(i::SetPermissions(data, length, permission));
- }
+ void Free(void* data, size_t) override { free(data); }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
@@ -1069,6 +1024,10 @@ void* V8::ClearWeak(i::Object** location) {
return i::GlobalHandles::ClearWeakness(location);
}
+void V8::AnnotateStrongRetainer(i::Object** location, const char* label) {
+ i::GlobalHandles::AnnotateStrongRetainer(location, label);
+}
+
void V8::DisposeGlobal(i::Object** location) {
i::GlobalHandles::Destroy(location);
}
@@ -2069,11 +2028,9 @@ bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
-
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
- : impl_(new i::StreamedSource(stream, encoding)) {}
-
+ : impl_(new i::ScriptStreamingData(stream, encoding)) {}
ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
@@ -2358,6 +2315,37 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
RETURN_ESCAPED(result);
}
+namespace {
+
+i::Compiler::ScriptDetails GetScriptDetails(
+ i::Isolate* isolate, Local<Value> resource_name,
+ Local<Integer> resource_line_offset, Local<Integer> resource_column_offset,
+ Local<Value> source_map_url, Local<PrimitiveArray> host_defined_options) {
+ i::Compiler::ScriptDetails script_details;
+ if (!resource_name.IsEmpty()) {
+ script_details.name_obj = Utils::OpenHandle(*(resource_name));
+ }
+ if (!resource_line_offset.IsEmpty()) {
+ script_details.line_offset =
+ static_cast<int>(resource_line_offset->Value());
+ }
+ if (!resource_column_offset.IsEmpty()) {
+ script_details.column_offset =
+ static_cast<int>(resource_column_offset->Value());
+ }
+ script_details.host_defined_options = isolate->factory()->empty_fixed_array();
+ if (!host_defined_options.IsEmpty()) {
+ script_details.host_defined_options =
+ Utils::OpenHandle(*(host_defined_options));
+ }
+ if (!source_map_url.IsEmpty()) {
+ script_details.source_map_url = Utils::OpenHandle(*(source_map_url));
+ }
+ return script_details;
+}
+
+} // namespace
+
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
@@ -2366,17 +2354,21 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
- bool produce_cache = options == kProduceParserCache ||
- options == kProduceCodeCache ||
- options == kProduceFullCodeCache;
-
- // Don't try to produce any kind of cache when the debugger is loaded.
- if (isolate->debug()->is_loaded() && produce_cache) {
+ // ProduceParserCache, ProduceCodeCache, ProduceFullCodeCache and
+ // ConsumeParserCache are not supported. They are present only for
+ // backward compatability. All these options behave as kNoCompileOptions.
+ if (options == kConsumeParserCache) {
+ // We do not support parser caches anymore. Just set cached_data to
+ // rejected to signal an error.
+ options = kNoCompileOptions;
+ source->cached_data->rejected = true;
+ } else if (options == kProduceParserCache || options == kProduceCodeCache ||
+ options == kProduceFullCodeCache) {
options = kNoCompileOptions;
}
i::ScriptData* script_data = nullptr;
- if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+ if (options == kConsumeCodeCache) {
DCHECK(source->cached_data);
// ScriptData takes care of pointer-aligning the data.
script_data = new i::ScriptData(source->cached_data->data,
@@ -2386,32 +2378,14 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
- i::MaybeHandle<i::Object> name_obj;
- i::MaybeHandle<i::Object> source_map_url;
- i::MaybeHandle<i::FixedArray> host_defined_options =
- isolate->factory()->empty_fixed_array();
- int line_offset = 0;
- int column_offset = 0;
- if (!source->resource_name.IsEmpty()) {
- name_obj = Utils::OpenHandle(*(source->resource_name));
- }
- if (!source->host_defined_options.IsEmpty()) {
- host_defined_options = Utils::OpenHandle(*(source->host_defined_options));
- }
- if (!source->resource_line_offset.IsEmpty()) {
- line_offset = static_cast<int>(source->resource_line_offset->Value());
- }
- if (!source->resource_column_offset.IsEmpty()) {
- column_offset = static_cast<int>(source->resource_column_offset->Value());
- }
- if (!source->source_map_url.IsEmpty()) {
- source_map_url = Utils::OpenHandle(*(source->source_map_url));
- }
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, source->resource_name, source->resource_line_offset,
+ source->resource_column_offset, source->source_map_url,
+ source->host_defined_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, name_obj, line_offset, column_offset, source->resource_options,
- source_map_url, isolate->native_context(), nullptr, &script_data,
- options, no_cache_reason, i::NOT_NATIVES_CODE, host_defined_options);
+ str, script_details, source->resource_options, nullptr, &script_data,
+ options, no_cache_reason, i::NOT_NATIVES_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception && script_data != nullptr) {
// This case won't happen during normal operation; we have compiled
@@ -2422,13 +2396,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
RETURN_ON_FAILED_EXECUTION(UnboundScript);
- if (produce_cache && script_data != nullptr) {
- // script_data now contains the data that was generated. source will
- // take the ownership.
- source->cached_data = new CachedData(
- script_data->data(), script_data->length(), CachedData::BufferOwned);
- script_data->ReleaseDataOwnership();
- } else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+ if (options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
delete script_data;
@@ -2593,9 +2561,11 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
if (!i::FLAG_script_streaming) {
return nullptr;
}
+ // We don't support other compile options on streaming background compiles.
+ // TODO(rmcilroy): remove CompileOptions from the API.
+ CHECK(options == ScriptCompiler::kNoCompileOptions);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- return new i::BackgroundParsingTask(source->impl(), options,
- i::FLAG_stack_size, isolate);
+ return i::Compiler::NewBackgroundCompileTask(source->impl(), isolate);
}
@@ -2605,59 +2575,24 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
- i::StreamedSource* source = v8_source->impl();
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileStreamedScript");
+
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
- i::Handle<i::Script> script = isolate->factory()->NewScript(str);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- i::Script::InitLineEnds(script);
- }
- if (!origin.ResourceName().IsEmpty()) {
- script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
- }
- if (!origin.HostDefinedOptions().IsEmpty()) {
- script->set_host_defined_options(
- *Utils::OpenHandle(*(origin.HostDefinedOptions())));
- }
- if (!origin.ResourceLineOffset().IsEmpty()) {
- script->set_line_offset(
- static_cast<int>(origin.ResourceLineOffset()->Value()));
- }
- if (!origin.ResourceColumnOffset().IsEmpty()) {
- script->set_column_offset(
- static_cast<int>(origin.ResourceColumnOffset()->Value()));
- }
- script->set_origin_options(origin.Options());
- if (!origin.SourceMapUrl().IsEmpty()) {
- script->set_source_mapping_url(
- *Utils::OpenHandle(*(origin.SourceMapUrl())));
- }
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, origin.ResourceName(), origin.ResourceLineOffset(),
+ origin.ResourceColumnOffset(), origin.SourceMapUrl(),
+ origin.HostDefinedOptions());
+ i::ScriptStreamingData* streaming_data = v8_source->impl();
- source->info->set_script(script);
- source->parser->UpdateStatistics(isolate, script);
- source->info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
- source->parser->HandleSourceURLComments(isolate, script);
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForStreamedScript(
+ str, script_details, origin.Options(), streaming_data);
i::Handle<i::SharedFunctionInfo> result;
- if (source->info->literal() == nullptr) {
- // Parsing has failed - report error messages.
- source->info->pending_error_handler()->ReportErrors(
- isolate, script, source->info->ast_value_factory());
- } else {
- // Parsing has succeeded - finalize compile.
- if (i::FLAG_background_compile) {
- result = i::Compiler::GetSharedFunctionInfoForBackgroundCompile(
- script, source->info.get(), str->length(),
- source->outer_function_job.get(), &source->inner_function_jobs);
- } else {
- result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
- script, source->info.get(), str->length());
- }
- }
- has_pending_exception = result.is_null();
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception) isolate->ReportPendingMessages();
- source->Release();
-
RETURN_ON_FAILED_EXECUTION(Script);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
@@ -3304,6 +3239,16 @@ MaybeLocal<WasmCompiledModule> ValueDeserializer::Delegate::GetWasmModuleFromId(
return MaybeLocal<WasmCompiledModule>();
}
+MaybeLocal<SharedArrayBuffer>
+ValueDeserializer::Delegate::GetSharedArrayBufferFromId(Isolate* v8_isolate,
+ uint32_t id) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(),
+ i::MessageTemplate::kDataCloneDeserializationError));
+ return MaybeLocal<SharedArrayBuffer>();
+}
+
struct ValueDeserializer::PrivateData {
PrivateData(i::Isolate* i, i::Vector<const uint8_t> data, Delegate* delegate)
: isolate(i), deserializer(i, data, delegate) {}
@@ -3544,24 +3489,22 @@ bool Value::IsWebAssemblyCompiledModule() const {
js_obj->map()->GetConstructor();
}
-#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
- bool Value::Is##Type() const { \
- i::Handle<i::Object> obj = Utils::OpenHandle(this); \
- if (!obj->IsHeapObject()) return false; \
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); \
- return obj->HasSpecificClassOf(isolate->heap()->Class##_string()); \
+#define VALUE_IS_SPECIFIC_TYPE(Type, Check) \
+ bool Value::Is##Type() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ return obj->Is##Check(); \
}
-VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, Arguments)
-VALUE_IS_SPECIFIC_TYPE(BooleanObject, Boolean)
-VALUE_IS_SPECIFIC_TYPE(NumberObject, Number)
-VALUE_IS_SPECIFIC_TYPE(StringObject, String)
-VALUE_IS_SPECIFIC_TYPE(SymbolObject, Symbol)
-VALUE_IS_SPECIFIC_TYPE(Date, Date)
-VALUE_IS_SPECIFIC_TYPE(Map, Map)
-VALUE_IS_SPECIFIC_TYPE(Set, Set)
-VALUE_IS_SPECIFIC_TYPE(WeakMap, WeakMap)
-VALUE_IS_SPECIFIC_TYPE(WeakSet, WeakSet)
+VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, JSArgumentsObject)
+VALUE_IS_SPECIFIC_TYPE(BooleanObject, BooleanWrapper)
+VALUE_IS_SPECIFIC_TYPE(NumberObject, NumberWrapper)
+VALUE_IS_SPECIFIC_TYPE(StringObject, StringWrapper)
+VALUE_IS_SPECIFIC_TYPE(SymbolObject, SymbolWrapper)
+VALUE_IS_SPECIFIC_TYPE(Date, JSDate)
+VALUE_IS_SPECIFIC_TYPE(Map, JSMap)
+VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
+VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
+VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
#undef VALUE_IS_SPECIFIC_TYPE
@@ -3953,55 +3896,36 @@ void v8::SharedArrayBuffer::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()", "Could not convert to date");
+ Utils::ApiCheck(obj->IsJSDate(), "v8::Date::Cast()",
+ "Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->String_string()),
- "v8::StringObject::Cast()",
+ Utils::ApiCheck(obj->IsStringWrapper(), "v8::StringObject::Cast()",
"Could not convert to StringObject");
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
- "v8::SymbolObject::Cast()",
+ Utils::ApiCheck(obj->IsSymbolWrapper(), "v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Number_string()),
- "v8::NumberObject::Cast()",
+ Utils::ApiCheck(obj->IsNumberWrapper(), "v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(
- isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()", "Could not convert to BooleanObject");
+ Utils::ApiCheck(obj->IsBooleanWrapper(), "v8::BooleanObject::Cast()",
+ "Could not convert to BooleanObject");
}
@@ -4432,7 +4356,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
desc.set_enumerable(false);
desc.set_configurable(true);
desc.set_value(value_obj);
- return i::JSProxy::SetPrivateProperty(
+ return i::JSProxy::SetPrivateSymbol(
isolate, i::Handle<i::JSProxy>::cast(self),
i::Handle<i::Symbol>::cast(key_obj), &desc, i::kDontThrow);
}
@@ -4577,10 +4501,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
v8::IndexFilter::kIncludeIndices);
}
-MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
- KeyCollectionMode mode,
- PropertyFilter property_filter,
- IndexFilter index_filter) {
+MaybeLocal<Array> v8::Object::GetPropertyNames(
+ Local<Context> context, KeyCollectionMode mode,
+ PropertyFilter property_filter, IndexFilter index_filter,
+ KeyConversionMode key_conversion) {
PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
@@ -4590,7 +4514,8 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
accumulator.set_skip_indices(index_filter == IndexFilter::kSkipIndices);
has_pending_exception = accumulator.CollectKeys(self, self).IsNothing();
RETURN_ON_FAILED_EXECUTION(Array);
- value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
+ value =
+ accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache()->keys() != *value);
@@ -4614,10 +4539,11 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
}
-MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
- PropertyFilter filter) {
+MaybeLocal<Array> v8::Object::GetOwnPropertyNames(
+ Local<Context> context, PropertyFilter filter,
+ KeyConversionMode key_conversion) {
return GetPropertyNames(context, KeyCollectionMode::kOwnOnly, filter,
- v8::IndexFilter::kIncludeIndices);
+ v8::IndexFilter::kIncludeIndices, key_conversion);
}
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
@@ -4754,14 +4680,14 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
return maybe;
}
-
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes,
- bool is_special_data_property) {
+ bool is_special_data_property,
+ bool replace_on_access) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@@ -4771,7 +4697,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
v8::Local<AccessorSignature> signature;
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
- is_special_data_property, false);
+ is_special_data_property, replace_on_access);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4797,7 +4723,7 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
PropertyAttribute attribute) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
- i::FLAG_disable_old_api_accessors);
+ i::FLAG_disable_old_api_accessors, false);
}
@@ -4827,7 +4753,17 @@ Maybe<bool> Object::SetNativeDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> data,
PropertyAttribute attributes) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
- attributes, true);
+ attributes, true, false);
+}
+
+Maybe<bool> Object::SetLazyDataProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ v8::Local<Value> data,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(context, this, name, getter,
+ static_cast<AccessorNameSetterCallback>(nullptr),
+ data, DEFAULT, attributes, true, true);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@@ -7352,13 +7288,11 @@ Local<Array> Set::AsArray() const {
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
- i::Handle<i::Object> result;
+ Local<Promise::Resolver> result;
has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_internal_constructor(),
- isolate->factory()->undefined_value(), 0, nullptr)
- .ToHandle(&result);
+ !ToLocal<Promise::Resolver>(isolate->factory()->NewJSPromise(), &result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
- RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
+ RETURN_ESCAPED(result);
}
@@ -7380,12 +7314,14 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Resolve, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
+ auto promise = i::Handle<i::JSPromise>::cast(self);
+
+ if (promise->status() != Promise::kPending) {
+ return Just(true);
+ }
+
has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_resolve(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
+ i::JSPromise::Resolve(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -7403,15 +7339,14 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Reject, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
+ auto promise = i::Handle<i::JSPromise>::cast(self);
+
+ if (promise->status() != Promise::kPending) {
+ return Just(true);
+ }
- // We pass true to trigger the debugger's on exception handler.
- i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value),
- isolate->factory()->ToBoolean(true)};
has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
+ i::JSPromise::Reject(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -7605,8 +7540,9 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
return MaybeLocal<WasmCompiledModule>();
}
- i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
- i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
+ i::MaybeHandle<i::JSObject> maybe_compiled =
+ i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7665,30 +7601,29 @@ void WasmModuleObjectBuilderStreaming::Finish() {
}
// AsyncCompile makes its own copy of the wire bytes. This inefficiency
// will be resolved when we move to true streaming compilation.
- i::wasm::AsyncCompile(reinterpret_cast<i::Isolate*>(isolate_),
- Utils::OpenHandle(*promise_.Get(isolate_)),
- {wire_bytes.get(), wire_bytes.get() + total_size_},
- false);
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ i_isolate->wasm_engine()->AsyncCompile(
+ i_isolate, Utils::OpenHandle(*promise_.Get(isolate_)),
+ {wire_bytes.get(), wire_bytes.get() + total_size_}, false);
}
-void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
+void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
Local<Promise> promise = GetPromise();
// The promise has already been resolved, e.g. because of a compilation
// error.
if (promise->State() != v8::Promise::kPending) return;
if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
- // If there is no exception, then we do not reject the promise. The reason is
- // that 'no exception' indicates that we are in a ScriptForbiddenScope, which
- // means that it is not allowed to reject the promise at the moment, or
- // execute any other JavaScript code.
+ // If no exception value is provided, we do not reject the promise. This can
+ // happen when streaming compilation gets aborted when no script execution is
+ // allowed anymore, e.g. when a browser tab gets refreshed.
if (exception.IsEmpty()) return;
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
Local<Context> context = Utils::ToLocal(handle(i_isolate->context()));
- auto maybe = resolver->Reject(context, exception);
+ auto maybe = resolver->Reject(context, exception.ToLocalChecked());
CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
}
@@ -7696,29 +7631,6 @@ WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
promise_.Reset();
}
-void WasmModuleObjectBuilder::OnBytesReceived(const uint8_t* bytes,
- size_t size) {
- std::unique_ptr<uint8_t[]> cloned_bytes(new uint8_t[size]);
- memcpy(cloned_bytes.get(), bytes, size);
- received_buffers_.push_back(
- Buffer(std::unique_ptr<const uint8_t[]>(
- const_cast<const uint8_t*>(cloned_bytes.release())),
- size));
- total_size_ += size;
-}
-
-MaybeLocal<WasmCompiledModule> WasmModuleObjectBuilder::Finish() {
- std::unique_ptr<uint8_t[]> wire_bytes(new uint8_t[total_size_]);
- uint8_t* insert_at = wire_bytes.get();
-
- for (size_t i = 0; i < received_buffers_.size(); ++i) {
- const Buffer& buff = received_buffers_[i];
- memcpy(insert_at, buff.first.get(), buff.second);
- insert_at += buff.second;
- }
- return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_);
-}
-
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -8690,24 +8602,20 @@ void Isolate::RunMicrotasks() {
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
-
-void Isolate::EnqueueMicrotask(Local<Function> microtask) {
+void Isolate::EnqueueMicrotask(Local<Function> function) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask));
+ i::Handle<i::CallableTask> microtask = isolate->factory()->NewCallableTask(
+ Utils::OpenHandle(*function), isolate->native_context());
+ isolate->EnqueueMicrotask(microtask);
}
-
-void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
+void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::HandleScope scope(isolate);
- i::Handle<i::CallHandlerInfo> callback_info =
- i::Handle<i::CallHandlerInfo>::cast(
- isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::NOT_TENURED));
- SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
- SET_FIELD_WRAPPED(callback_info, set_js_callback,
- callback_info->redirected_callback());
- SET_FIELD_WRAPPED(callback_info, set_data, data);
- isolate->EnqueueMicrotask(callback_info);
+ i::Handle<i::CallbackTask> microtask = isolate->factory()->NewCallbackTask(
+ isolate->factory()->NewForeign(reinterpret_cast<i::Address>(callback)),
+ isolate->factory()->NewForeign(reinterpret_cast<i::Address>(data)));
+ isolate->EnqueueMicrotask(microtask);
}
@@ -8806,6 +8714,12 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (!dependant_context) {
+ // We left the current context, we can abort all running WebAssembly
+ // compilations.
+ isolate->wasm_engine()->compilation_manager()->AbortAllJobs();
+ }
+ // TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
@@ -9139,78 +9053,6 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
// --- D e b u g S u p p o r t ---
-bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
- Local<Value> data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (that == nullptr) {
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::HandleScope scope(i_isolate);
- i_isolate->debug()->SetDebugDelegate(nullptr, false);
- } else {
- // Might create the Debug context.
- ENTER_V8_FOR_NEW_CONTEXT(i_isolate);
- i::HandleScope scope(i_isolate);
- i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
- if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
- i::NativeDebugDelegate* delegate =
- new i::NativeDebugDelegate(i_isolate, that, i_data);
- i_isolate->debug()->SetDebugDelegate(delegate, true);
- }
- return true;
-}
-
-void Debug::DebugBreak(Isolate* isolate) { debug::DebugBreak(isolate); }
-
-void Debug::CancelDebugBreak(Isolate* isolate) {
- debug::CancelDebugBreak(isolate);
-}
-
-bool Debug::CheckDebugBreak(Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return internal_isolate->stack_guard()->CheckDebugBreak();
-}
-
-void Debug::SetMessageHandler(Isolate* isolate,
- v8::Debug::MessageHandler handler) {}
-
-void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
- ClientData* client_data) {}
-
-MaybeLocal<Value> Debug::Call(Local<Context> context,
- v8::Local<v8::Function> fun,
- v8::Local<v8::Value> data) {
- return debug::Call(context, fun, data);
-}
-
-void Debug::ProcessDebugMessages(Isolate* isolate) {}
-
-Local<Context> Debug::GetDebugContext(Isolate* isolate) {
- return debug::GetDebugContext(isolate);
-}
-
-MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
- i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
- if (calling.is_null()) return MaybeLocal<Context>();
- return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
-}
-
-void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
- debug::SetLiveEditEnabled(isolate, enable);
-}
-
-bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { return false; }
-
-void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
-}
-
-MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
- Local<Value> value) {
- return debug::GetInternalProperties(v8_isolate, value);
-}
-
void debug::SetContextId(Local<Context> context, int id) {
Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
}
@@ -9653,13 +9495,11 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
ScriptOriginOptions origin_options;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, i::MaybeHandle<i::Object>(), 0, 0, origin_options,
- i::MaybeHandle<i::Object>(), isolate->native_context(), nullptr,
+ str, i::Compiler::ScriptDetails(), origin_options, nullptr,
&script_data, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
- : i::INSPECTOR_CODE,
- i::MaybeHandle<i::FixedArray>());
+ : i::INSPECTOR_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
@@ -9836,6 +9676,18 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
}
+MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
+ v8::Local<v8::String> source) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source)),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
void debug::QueryObjects(v8::Local<v8::Context> v8_context,
QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
@@ -10464,6 +10316,12 @@ void HeapProfiler::SetGetRetainerInfosCallback(
callback);
}
+void HeapProfiler::SetBuildEmbedderGraphCallback(
+ BuildEmbedderGraphCallback callback) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetBuildEmbedderGraphCallback(
+ callback);
+}
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -10527,7 +10385,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::HandleScope scope(i_isolate);
- internal::Deoptimizer::DeoptimizeAll(i_isolate);
+ i::Deoptimizer::DeoptimizeAll(i_isolate);
}
@@ -10571,14 +10429,15 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
if (last_handle_before_deferred_block_ != nullptr &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
- v->VisitRootPointers(Root::kHandleScope, block,
+ v->VisitRootPointers(Root::kHandleScope, nullptr, block,
last_handle_before_deferred_block_);
DCHECK(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
- v->VisitRootPointers(Root::kHandleScope, block, &block[kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, nullptr, block,
+ &block[kHandleBlockSize]);
}
}
@@ -10587,7 +10446,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
// Iterate over live handles in the last block (if any).
if (!blocks()->empty()) {
- v->VisitRootPointers(Root::kHandleScope, blocks()->back(),
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks()->back(),
handle_scope_data_.next);
}
@@ -10596,11 +10455,11 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
for (unsigned i = 0; i < arraysize(context_lists); i++) {
if (context_lists[i]->empty()) continue;
Object** start = reinterpret_cast<Object**>(&context_lists[i]->front());
- v->VisitRootPointers(Root::kHandleScope, start,
+ v->VisitRootPointers(Root::kHandleScope, nullptr, start,
start + context_lists[i]->size());
}
if (microtask_context_) {
- v->VisitRootPointer(Root::kHandleScope,
+ v->VisitRootPointer(Root::kHandleScope, nullptr,
reinterpret_cast<Object**>(&microtask_context_));
}
}
@@ -10670,10 +10529,11 @@ void DeferredHandles::Iterate(RootVisitor* v) {
DCHECK((first_block_limit_ >= blocks_.front()) &&
(first_block_limit_ <= &(blocks_.front())[kHandleBlockSize]));
- v->VisitRootPointers(Root::kHandleScope, blocks_.front(), first_block_limit_);
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_.front(),
+ first_block_limit_);
for (size_t i = 1; i < blocks_.size(); i++) {
- v->VisitRootPointers(Root::kHandleScope, blocks_[i],
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_[i],
&blocks_[i][kHandleBlockSize]);
}
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 7bd03c37da..e67f4f7d47 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -180,6 +180,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<BigInt64Array> ToLocalBigInt64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<BigUint64Array> ToLocalBigUint64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index f420f2e5cb..4d7d9895ce 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
@@ -109,7 +109,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -143,27 +143,27 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -189,7 +189,7 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = reinterpret_cast<int32_t>(f.address());
}
-Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE32) {
+Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -273,15 +273,13 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
-
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -329,17 +327,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(isolate, pc, sizeof(target));
+ // Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -357,7 +353,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
+ Assembler::FlushICache(pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -377,13 +373,42 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
+ Assembler::FlushICache(pc, 4 * kInstrSize);
}
}
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+template <typename T>
+bool UseScratchRegisterScope::CanAcquireVfp() const {
+ VfpRegList* available = assembler_->GetScratchVfpRegisterList();
+ DCHECK_NOT_NULL(available);
+ for (int index = 0; index < T::kNumRegisters; index++) {
+ T reg = T::from_code(index);
+ uint64_t mask = reg.ToVfpRegList();
+ if ((*available & mask) == mask) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename T>
+T UseScratchRegisterScope::AcquireVfp() {
+ VfpRegList* available = assembler_->GetScratchVfpRegisterList();
+ DCHECK_NOT_NULL(available);
+ for (int index = 0; index < T::kNumRegisters; index++) {
+ T reg = T::from_code(index);
+ uint64_t mask = reg.ToVfpRegList();
+ if ((*available & mask) == mask) {
+ *available &= ~mask;
+ return reg;
+ }
+ }
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index a615d67496..1011db4b80 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -347,22 +347,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -566,10 +564,16 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
// it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
// its use consistent with other features, we always enable it if we can.
EnableCpuFeature(VFP32DREGS);
+ // Make sure we pick two D registers which alias a Q register. This way, we
+ // can use a Q as a scratch if NEON is supported.
+ scratch_vfp_register_list_ = d14.ToVfpRegList() | d15.ToVfpRegList();
+ } else {
+ // When VFP32DREGS is not supported, d15 become allocatable. Therefore we
+ // cannot use it as a scratch.
+ scratch_vfp_register_list_ = d14.ToVfpRegList();
}
}
-
Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
@@ -1214,6 +1218,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(x.IsImmediate());
// Upon failure to encode, the opcode should not have changed.
DCHECK(opcode == (instr & kOpCodeMask));
+ UseScratchRegisterScope temps(this);
Condition cond = Instruction::ConditionField(instr);
if ((opcode == MOV) && !set_flags) {
// Generate a sequence of mov instructions or a load from the constant
@@ -1221,7 +1226,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
} else if ((opcode == ADD) && !set_flags && (rd == rn) &&
- (scratch_register_list_ == 0)) {
+ !temps.CanAcquire()) {
// Split the operation into a sequence of additions if we cannot use a
// scratch register. In this case, we cannot re-use rn and the assembler
// does not have any scratch registers to spare.
@@ -1244,7 +1249,6 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to a scratch register and change the original instruction to
// use it.
- UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
(rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
@@ -1501,6 +1505,10 @@ void Assembler::and_(Register dst, Register src1, const Operand& src2,
AddrMode1(cond | AND | s, dst, src1, src2);
}
+void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
+ Condition cond) {
+ and_(dst, src1, Operand(src2), s, cond);
+}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@@ -2367,6 +2375,11 @@ void Assembler::isb(BarrierOption option) {
}
}
+void Assembler::csdb() {
+ // Details available in Arm Cache Speculation Side-channels white paper,
+ // version 1.1, page 4.
+ emit(0xE320F014);
+}
// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
@@ -5153,8 +5166,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
- DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
- rmode != RelocInfo::NONE64);
+ DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
@@ -5474,24 +5486,24 @@ void PatchingAssembler::Emit(Address addr) {
emit(reinterpret_cast<Instr>(addr));
}
-void PatchingAssembler::FlushICache(Isolate* isolate) {
- Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
-}
-
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
- : available_(assembler->GetScratchRegisterList()),
- old_available_(*available_) {}
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()),
+ old_available_vfp_(*assembler->GetScratchVfpRegisterList()) {}
UseScratchRegisterScope::~UseScratchRegisterScope() {
- *available_ = old_available_;
+ *assembler_->GetScratchRegisterList() = old_available_;
+ *assembler_->GetScratchVfpRegisterList() = old_available_vfp_;
}
Register UseScratchRegisterScope::Acquire() {
- DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
- return Register::from_code(index);
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
}
} // namespace internal
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 8b95aad886..32baa0ae8d 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -56,8 +56,9 @@ namespace internal {
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -184,6 +185,17 @@ enum SwVfpRegisterCode {
kSwVfpAfterLast
};
+// Representation of a list of non-overlapping VFP registers. This list
+// represents the data layout of VFP registers as a bitfield:
+// S registers cover 1 bit
+// D registers cover 2 bits
+// Q registers cover 4 bits
+//
+// This way, we make sure no registers in the list ever overlap. However, a list
+// may represent multiple different sets of registers,
+// e.g. [d0 s2 s3] <=> [s0 s1 d1].
+typedef uint64_t VfpRegList;
+
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
public:
@@ -195,6 +207,11 @@ class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
*vm = reg_code >> 1;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // Each bit in the list corresponds to a S register.
+ return uint64_t{0x1} << code();
+ }
private:
friend class RegisterBase;
@@ -217,10 +234,6 @@ enum DoubleRegisterCode {
// Double word VFP register.
class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
public:
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
static constexpr int kSizeInBytes = 8;
inline static int NumRegisters();
@@ -231,6 +244,11 @@ class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
*vm = reg_code & 0x0F;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
private:
friend class RegisterBase;
@@ -255,6 +273,11 @@ class LowDwVfpRegister
SwVfpRegister high() const {
return SwVfpRegister::from_code(code() * 2 + 1);
}
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
private:
friend class RegisterBase;
@@ -282,6 +305,11 @@ class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
DwVfpRegister high() const {
return DwVfpRegister::from_code(code() * 2 + 1);
}
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A Q register overlaps four S registers.
+ return uint64_t{0xf} << (code() * 4);
+ }
private:
friend class RegisterBase;
@@ -334,12 +362,6 @@ SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
constexpr LowDwVfpRegister kDoubleRegZero = d13;
-constexpr LowDwVfpRegister kScratchDoubleReg = d14;
-// This scratch q-register aliases d14 (kScratchDoubleReg) and d15, but is only
-// used if NEON is supported, which implies VFP32DREGS. When there are only 16
-// d-registers, d15 is still allocatable.
-constexpr QwNeonRegister kScratchQuadReg = q7;
-constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
constexpr CRegister no_creg = CRegister::no_reg();
@@ -376,7 +398,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
+ RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<HeapObject> handle);
@@ -651,7 +673,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -665,12 +687,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@@ -685,6 +706,9 @@ class Assembler : public AssemblerBase {
// register.
static constexpr int kPcLoadDelta = 8;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+ VfpRegList* GetScratchVfpRegisterList() {
+ return &scratch_vfp_register_list_;
+ }
// ---------------------------------------------------------------------------
// Code generation
@@ -717,6 +741,8 @@ class Assembler : public AssemblerBase {
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
+ void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
void eor(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -936,6 +962,9 @@ class Assembler : public AssemblerBase {
void dsb(BarrierOption option);
void isb(BarrierOption option);
+ // Conditional speculation barrier.
+ void csdb();
+
// Coprocessor instructions
void cdp(Coprocessor coproc, int opcode_1,
@@ -1655,6 +1684,7 @@ class Assembler : public AssemblerBase {
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
+ VfpRegList scratch_vfp_register_list_;
private:
// Avoid overflows for displacements etc.
@@ -1732,6 +1762,7 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
@@ -1747,8 +1778,6 @@ class Assembler : public AssemblerBase {
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
-constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
-
class EnsureSpace BASE_EMBEDDED {
public:
INLINE(explicit EnsureSpace(Assembler* assembler));
@@ -1760,7 +1789,6 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
void Emit(Address addr);
- void FlushICache(Isolate* isolate);
};
// This scope utility allows scratch registers to be managed safely. The
@@ -1779,12 +1807,38 @@ class UseScratchRegisterScope {
// Take a register from the list and return it.
Register Acquire();
+ SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
+ LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
+ DwVfpRegister AcquireD() {
+ DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
+ DCHECK(assembler_->VfpRegisterIsAvailable(reg));
+ return reg;
+ }
+ QwNeonRegister AcquireQ() {
+ QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
+ DCHECK(assembler_->VfpRegisterIsAvailable(reg));
+ return reg;
+ }
private:
- // Currently available scratch registers.
- RegList* available_;
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ // Check if we have registers available to acquire.
+ // These methods are kept private intentionally to restrict their usage to the
+ // assemblers. Choosing to emit a difference instruction sequence depending on
+ // the availability of scratch registers is generally their job.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ template <typename T>
+ bool CanAcquireVfp() const;
+
+ template <typename T>
+ T AcquireVfp();
+
+ Assembler* assembler_;
// Available scratch registers at the start of this scope.
RegList old_available_;
+ VfpRegList old_available_vfp_;
};
} // namespace internal
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index ee706c7656..2695bafc1b 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -46,7 +46,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
- LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ LowDwVfpRegister double_scratch = temps.AcquireLowD();
// Save the old values from these temporary registers on the stack.
__ Push(double_high, double_low);
@@ -385,6 +385,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r1, Operand(pending_handler_entrypoint_address));
@@ -572,8 +578,8 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(tasm);
- predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(
+ tasm, tasm->CallStubSize() + 2 * Assembler::kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@@ -584,8 +590,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(masm);
- predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(
+ masm, masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 9fb2eb4e8d..b3e880e048 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -166,9 +166,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -257,7 +257,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
@@ -282,9 +282,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 1c865afb09..4e52a91738 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -641,8 +641,8 @@ class Instruction {
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
- // Test for a nop instruction, which falls under type 1.
- inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+ // Test for nop-like instructions which fall under type 1.
+ inline bool IsNopLikeType1() const { return Bits(24, 8) == 0x120F0; }
// Test for a stop instruction.
inline bool IsStop() const {
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9a21ef862c..a4a540512d 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -30,9 +30,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
- DCHECK_EQ(kDoubleRegZero.code(), 13);
- DCHECK_EQ(kScratchDoubleReg.code(), 14);
-
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 9951136561..9459a7e60d 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -937,8 +937,14 @@ void Decoder::DecodeType01(Instruction* instr) {
} else {
Unknown(instr); // not used by V8
}
- } else if ((type == 1) && instr->IsNopType1()) {
- Format(instr, "nop'cond");
+ } else if ((type == 1) && instr->IsNopLikeType1()) {
+ if (instr->BitField(7, 0) == 0) {
+ Format(instr, "nop'cond");
+ } else if (instr->BitField(7, 0) == 20) {
+ Format(instr, "csdb");
+ } else {
+ Unknown(instr); // Not used in V8.
+ }
} else {
switch (instr->OpcodeField()) {
case AND: {
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/arm/frame-constants-arm.h
index 9307cc22de..1230a26956 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/arm/frame-constants-arm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_FRAMES_ARM_H_
-#define V8_ARM_FRAMES_ARM_H_
+#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
+#define V8_ARM_FRAME_CONSTANTS_ARM_H_
namespace v8 {
namespace internal {
@@ -45,4 +45,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_FRAMES_ARM_H_
+#endif // V8_ARM_FRAME_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 6b7498fde5..20ecef6c1c 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -70,12 +70,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r2, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 30190d3f34..3a96b640a2 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -18,6 +18,7 @@
#include "src/double.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -241,22 +242,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond);
}
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch,
- Condition cond) {
- if (scratch == no_reg) {
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- } else {
- mov(scratch, reg1, LeaveCC, cond);
- mov(reg1, reg2, LeaveCC, cond);
- mov(reg2, scratch, LeaveCC, cond);
- }
-}
-
void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) {
@@ -305,27 +290,34 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
}
}
-void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
- if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
+void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
+ DCHECK(srcdst0 != srcdst1);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, srcdst0);
+ mov(srcdst0, srcdst1);
+ mov(srcdst1, scratch);
+}
+void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+ DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
- DCHECK_NE(srcdst0, kScratchDoubleReg);
- DCHECK_NE(srcdst1, kScratchDoubleReg);
- vmov(kScratchDoubleReg, srcdst0);
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vmov(scratch, srcdst0);
vmov(srcdst0, srcdst1);
- vmov(srcdst1, kScratchDoubleReg);
+ vmov(srcdst1, scratch);
}
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
- if (srcdst0 != srcdst1) {
- vswp(srcdst0, srcdst1);
- }
+ DCHECK(srcdst0 != srcdst1);
+ vswp(srcdst0, srcdst1);
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
@@ -817,11 +809,14 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
int dst_offset = dst_code & 1;
int src_offset = src_code & 1;
if (CpuFeatures::IsSupported(NEON)) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
// On Neon we can shift and insert from d-registers.
if (src_offset == dst_offset) {
// Offsets are the same, use vdup to copy the source to the opposite lane.
- vdup(Neon32, kScratchDoubleReg, src_d_reg, src_offset);
- src_d_reg = kScratchDoubleReg;
+ vdup(Neon32, scratch, src_d_reg, src_offset);
+ // Here we are extending the lifetime of scratch.
+ src_d_reg = scratch;
src_offset = dst_offset ^ 1;
}
if (dst_offset) {
@@ -842,27 +837,30 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
// Without Neon, use the scratch registers to move src and/or dst into
// s-registers.
- int scratchSCode = kScratchDoubleReg.low().code();
- int scratchSCode2 = kScratchDoubleReg2.low().code();
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister d_scratch = temps.AcquireLowD();
+ LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
+ int s_scratch_code = d_scratch.low().code();
+ int s_scratch_code2 = d_scratch2.low().code();
if (src_code < SwVfpRegister::kNumRegisters) {
// src is an s-register, dst is not.
- vmov(kScratchDoubleReg, dst_d_reg);
- vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
+ vmov(d_scratch, dst_d_reg);
+ vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
SwVfpRegister::from_code(src_code));
- vmov(dst_d_reg, kScratchDoubleReg);
+ vmov(dst_d_reg, d_scratch);
} else if (dst_code < SwVfpRegister::kNumRegisters) {
// dst is an s-register, src is not.
- vmov(kScratchDoubleReg, src_d_reg);
+ vmov(d_scratch, src_d_reg);
vmov(SwVfpRegister::from_code(dst_code),
- SwVfpRegister::from_code(scratchSCode + src_offset));
+ SwVfpRegister::from_code(s_scratch_code + src_offset));
} else {
// Neither src or dst are s-registers. Both scratch double registers are
// available when there are 32 VFP registers.
- vmov(kScratchDoubleReg, src_d_reg);
- vmov(kScratchDoubleReg2, dst_d_reg);
- vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
- SwVfpRegister::from_code(scratchSCode2 + src_offset));
- vmov(dst_d_reg, kScratchQuadReg.high());
+ vmov(d_scratch, src_d_reg);
+ vmov(d_scratch2, dst_d_reg);
+ vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
+ SwVfpRegister::from_code(s_scratch_code2 + src_offset));
+ vmov(dst_d_reg, d_scratch2);
}
}
@@ -870,11 +868,13 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister scratch = temps.AcquireLowD();
// TODO(bbudge) If Neon supported, use load single lane form of vld1.
- int dst_s_code = kScratchDoubleReg.low().code() + (dst_code & 1);
- vmov(kScratchDoubleReg, DwVfpRegister::from_code(dst_code / 2));
+ int dst_s_code = scratch.low().code() + (dst_code & 1);
+ vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
vldr(SwVfpRegister::from_code(dst_s_code), src);
- vmov(DwVfpRegister::from_code(dst_code / 2), kScratchDoubleReg);
+ vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
}
}
@@ -883,8 +883,10 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
// TODO(bbudge) If Neon supported, use store single lane form of vst1.
- int src_s_code = kScratchDoubleReg.low().code() + (src_code & 1);
- vmov(kScratchDoubleReg, DwVfpRegister::from_code(src_code / 2));
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister scratch = temps.AcquireLowD();
+ int src_s_code = scratch.low().code() + (src_code & 1);
+ vmov(scratch, DwVfpRegister::from_code(src_code / 2));
vstr(SwVfpRegister::from_code(src_s_code), dst);
}
}
@@ -938,9 +940,11 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
DCHECK(!AreAliased(dst_high, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -984,9 +988,11 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -1031,9 +1037,11 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -1362,13 +1370,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r4, Operand(debug_is_active));
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(eq, &skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(0));
+ b(ne, &call_hook);
+
+ ldr(r4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r4, &skip_hook);
+ ldr(r4, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
+ tst(r4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
b(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1426,7 +1451,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = r4;
+ Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1480,14 +1505,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r1, function);
- InvokeFunction(r1, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1615,13 +1632,22 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
- LowDwVfpRegister double_scratch = kScratchDoubleReg;
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
-
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ SwVfpRegister single_scratch = SwVfpRegister::no_reg();
+ if (temps.CanAcquireVfp<SwVfpRegister>()) {
+ single_scratch = temps.AcquireS();
+ } else {
+ // Re-use the input as a scratch register. However, we can only do this if
+ // the input register is d0-d15 as there are no s32+ registers.
+ DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
+ LowDwVfpRegister double_scratch =
+ LowDwVfpRegister::from_code(double_input.code());
+ single_scratch = double_scratch.low();
+ }
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
+ Register scratch = temps.Acquire();
// If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
sub(scratch, result, Operand(1));
cmp(scratch, Operand(0x7FFFFFFE));
@@ -1704,6 +1730,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -2275,6 +2307,15 @@ bool AreAliased(Register reg1,
}
#endif
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // We can use the register pc - 8 for the address of the current instruction.
+ sub(dst, pc, Operand(pc_offset() + TurboAssembler::kPcLoadDelta));
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index cf731cbedb..50ce6dc005 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -20,12 +20,15 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
+constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
+constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
+constexpr Register kOffHeapTrampolineRegister = r4;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@@ -305,15 +308,15 @@ class TurboAssembler : public Assembler {
inline bool AllowThisStubCall(CodeStub* stub);
void LslPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
@@ -481,7 +484,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code);
- // Register swap.
+ // Register swap. Note that the register operands should be distinct.
+ void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
@@ -530,6 +534,12 @@ class TurboAssembler : public Assembler {
#endif
}
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
private:
bool has_frame_ = false;
Isolate* const isolate_;
@@ -579,11 +589,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg,
- Condition cond = al);
-
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
@@ -694,10 +699,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support
void MaybeDropFrames();
@@ -797,6 +798,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 52fe902237..6a735fcef6 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -547,8 +547,7 @@ void ArmDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -645,11 +644,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -2314,8 +2308,15 @@ void Simulator::DecodeType01(Instruction* instr) {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
- } else if ((type == 1) && instr->IsNopType1()) {
- // NOP.
+ } else if ((type == 1) && instr->IsNopLikeType1()) {
+ if (instr->BitField(7, 0) == 0) {
+ // NOP.
+ } else if (instr->BitField(7, 0) == 20) {
+ // CSDB.
+ } else {
+ PrintF("%08x\n", instr->InstructionBits());
+ UNIMPLEMENTED();
+ }
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -5640,7 +5641,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
@@ -5822,7 +5823,7 @@ intptr_t Simulator::CallImpl(byte* entry, int argument_count,
return get_register(r0);
}
-int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
+intptr_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 1cb11ffd96..46a84ff4b4 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -183,6 +183,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -381,9 +382,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 11c4bbf33f..0c31400d9c 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -13,8 +13,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return true; }
@@ -95,7 +94,7 @@ inline void CPURegList::Remove(int code) {
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return csp;
+ return sp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
@@ -105,7 +104,7 @@ inline Register Register::XRegFromCode(unsigned code) {
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return wcsp;
+ return wsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
@@ -198,9 +197,7 @@ inline VRegister CPURegister::Q() const {
template<typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(T) {
- return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
- }
+ static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
return t;
@@ -211,9 +208,7 @@ struct ImmediateInitializer {
template<>
struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(Smi* t) {
- return RelocInfo::NONE64;
- }
+ static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi* t) {;
return reinterpret_cast<int64_t>(t);
}
@@ -581,26 +576,23 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
-
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(isolate(), pc, sizeof(target));
+ // Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -647,7 +639,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -681,28 +673,28 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index a031884e1f..52c2e4643f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -181,22 +181,20 @@ uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -467,9 +465,6 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- // Constant pool currently does not support 32-bit entries.
- DCHECK(mode != RelocInfo::NONE32);
-
return RelocInfo::IsNone(mode) ||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
}
@@ -2994,6 +2989,8 @@ void Assembler::isb() {
Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
}
+void Assembler::csdb() { hint(CSDB); }
+
void Assembler::fmov(const VRegister& vd, double imm) {
if (vd.IsScalar()) {
DCHECK(vd.Is1D());
@@ -4745,6 +4742,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // Non-relocatable constants should not end up in the literal pool.
+ DCHECK(!RelocInfo::IsNone(rmode));
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 2deae8aaa4..c956c072b7 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -68,7 +68,6 @@ namespace internal {
// clang-format on
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
@@ -455,8 +454,8 @@ constexpr Register no_reg = NoReg;
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
-DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
+DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
+DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
@@ -994,7 +993,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
@@ -1008,12 +1007,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@@ -1754,6 +1752,9 @@ class Assembler : public AssemblerBase {
// Instruction synchronization barrier
void isb();
+ // Conditional speculation barrier.
+ void csdb();
+
// Alias for system instructions.
void nop() { hint(NOP); }
@@ -3677,18 +3678,9 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
-
- // This version will flush at destruction.
- PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
- : PatchingAssembler(IsolateData(isolate), start, count) {
- CHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- }
-
- // This version will not flush.
+ // Note that the instruction cache will not be flushed.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
- : Assembler(isolate_data, start, count * kInstructionSize + kGap),
- isolate_(nullptr) {
+ : Assembler(isolate_data, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@@ -3701,18 +3693,12 @@ class PatchingAssembler : public Assembler {
DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
- // Flush the Instruction cache.
- size_t length = buffer_size_ - kGap;
- if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
static constexpr int kAdrFarPatchableNNops = 2;
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
-
- private:
- Isolate* isolate_;
};
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 52f92b6af9..07d020880d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Str(x1, MemOperand(__ StackPointer(), x5));
+ __ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@@ -314,7 +314,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ EnterExitFrame(
save_doubles(), x10, extra_stack_space,
is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
- DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@@ -349,12 +348,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
- // csp[32]: Alignment padding, if necessary.
- // csp[24]: Preserved x23 (used for target).
- // csp[16]: Preserved x22 (used for argc).
- // csp[8]: Preserved x21 (used for argv).
- // csp -> csp[0]: Space reserved for the return address.
+ // sp[...]: Saved doubles, if saved_doubles is true.
+ // sp[32]: Alignment padding, if necessary.
+ // sp[24]: Preserved x23 (used for target).
+ // sp[16]: Preserved x22 (used for argc).
+ // sp[8]: Preserved x21 (used for argv).
+ // sp -> sp[0]: Space reserved for the return address.
//
// After a successful call, the exit frame, preserved registers (x21-x23) and
// the arguments (including the receiver) are dropped or popped as
@@ -364,8 +363,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
- DCHECK(csp.Is(__ StackPointer()));
-
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
@@ -437,7 +434,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
- DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
@@ -454,7 +450,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Register scratch = temps.AcquireX();
__ Mov(scratch, Operand(pending_handler_sp_address));
__ Ldr(scratch, MemOperand(scratch));
- __ Mov(csp, scratch);
+ __ Mov(sp, scratch);
}
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -466,6 +462,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
@@ -511,7 +513,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
- __ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
+ __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@@ -582,7 +584,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- __ Mov(scratch, __ StackPointer());
+ __ Mov(scratch, sp);
__ Str(scratch, MemOperand(x11));
}
@@ -740,10 +742,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- // Make sure the caller configured the stack pointer (see comment in
- // DirectCEntryStub::Generate).
- DCHECK(csp.Is(__ StackPointer()));
-
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
@@ -1260,7 +1258,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Prepare arguments.
Register args = x6;
- __ Mov(args, masm->StackPointer());
+ __ Mov(args, sp);
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
@@ -1344,7 +1342,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Mov(x0, sp); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index b02dd5d2d7..406b139a50 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -407,12 +407,13 @@ enum Extend {
};
enum SystemHint {
- NOP = 0,
+ NOP = 0,
YIELD = 1,
- WFE = 2,
- WFI = 3,
- SEV = 4,
- SEVL = 5
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5,
+ CSDB = 20
};
enum BarrierDomain {
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 55a09dc1c5..201dfaa423 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -168,11 +168,6 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
- (instr->Mask(0x003FFFC0) == 0x000320C0) ||
- (instr->Mask(0x003FF100) == 0x00032100) ||
- (instr->Mask(0x003FF200) == 0x00032200) ||
- (instr->Mask(0x003FF400) == 0x00032400) ||
- (instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
V::VisitUnallocated(instr);
@@ -467,6 +462,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
break;
}
+ V8_FALLTHROUGH;
}
case 1:
case 3:
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 8269e8e50a..a81621b6a9 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -33,7 +33,7 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
- masm->Add(src, masm->StackPointer(), src_offset);
+ masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
@@ -140,8 +140,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
- __ Add(fp_to_sp, __ StackPointer(),
- kSavedRegistersAreaSize + (2 * kPointerSize));
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@@ -222,7 +221,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
- __ Mov(__ StackPointer(), scratch);
+ __ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 41c654b214..d344903d59 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -968,7 +968,7 @@ void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
switch (instr->Mask(FPCompareMask)) {
case FCMP_s_zero:
- case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_d_zero: form = form_zero; V8_FALLTHROUGH;
case FCMP_s:
case FCMP_d: mnemonic = "fcmp"; break;
default: form = "(FPCompare)";
@@ -1246,6 +1246,11 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
form = nullptr;
break;
}
+ case CSDB: {
+ mnemonic = "csdb";
+ form = nullptr;
+ break;
+ }
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
switch (instr->Mask(MemBarrierMask)) {
@@ -3327,7 +3332,7 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
}
}
- if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
@@ -3339,9 +3344,9 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
AppendToOutput("%c%d", reg_char, reg.code());
}
- } else if (reg.Aliases(csp)) {
- // Disassemble w31/x31 as stack pointer wcsp/csp.
- AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
} else {
// Disassemble w31/x31 as zero register wzr/xzr.
AppendToOutput("%czr", reg_char);
@@ -3713,6 +3718,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
return 0;
}
+ UNIMPLEMENTED();
+ return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -3836,7 +3843,8 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
switch (format[1]) {
case 'D': { // NDP.
DCHECK(instr->ShiftDP() != ROR);
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case 'L': { // NLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index c12d53b7e6..0edb2ea583 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DISASM_ARM64_H
-#define V8_ARM64_DISASM_ARM64_H
+#ifndef V8_ARM64_DISASM_ARM64_H_
+#define V8_ARM64_DISASM_ARM64_H_
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@@ -96,4 +96,4 @@ class PrintDisassembler : public DisassemblingDecoder {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DISASM_ARM64_H
+#endif // V8_ARM64_DISASM_ARM64_H_
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 48909d5b2d..79d8510f9b 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -11,7 +11,7 @@ namespace internal {
static const int kX0DwarfCode = 0;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
-static const int kCSpDwarfCode = 31;
+static const int kSpDwarfCode = 31;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -8;
@@ -33,7 +33,7 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
case kRegCode_x30:
return kLrDwarfCode;
case kSPRegInternalCode:
- return kCSpDwarfCode;
+ return kSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
default:
@@ -51,8 +51,8 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
- case kCSpDwarfCode:
- return "csp"; // This could be zr as well
+ case kSpDwarfCode:
+ return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
return nullptr;
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index a337079786..00ac99d1be 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_FRAMES_ARM64_H_
-#define V8_ARM64_FRAMES_ARM64_H_
+#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
namespace v8 {
namespace internal {
@@ -61,4 +61,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_FRAMES_ARM64_H_
+#endif // V8_ARM64_FRAME_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 0cc3e803d0..499023ebb2 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -258,7 +258,7 @@ class Instruction {
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
- // The following instructions use csp or wsp as Rd:
+ // The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
@@ -272,7 +272,7 @@ class Instruction {
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
- // can set the flags. The others can all write into csp.
+ // can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
@@ -287,7 +287,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
- // The following instructions use csp or wsp as Rn:
+ // The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 8e9cce7197..f9550782c1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -91,7 +91,6 @@ static const CounterDescriptor kCounterList[] = {
{"PC Addressing", Gauge},
{"Other", Gauge},
- {"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
@@ -238,16 +237,8 @@ void Instrument::VisitPCRelAddressing(Instruction* instr) {
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
@@ -470,16 +461,8 @@ void Instrument::VisitAddSubShifted(Instruction* instr) {
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 17b058bd01..bcbe5d97dc 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -69,15 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function info
- // x2: feedback vector
- // x3: slot
- Register registers[] = {x1, x2, x3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 0861551d89..f96d4b20b8 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
-void MacroAssembler::CmovX(const Register& rd,
- const Register& rn,
+void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@@ -416,6 +415,11 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
+void TurboAssembler::Csdb() {
+ DCHECK(allow_macro_instructions());
+ csdb();
+}
+
void TurboAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -423,8 +427,7 @@ void TurboAssembler::Cset(const Register& rd, Condition cond) {
cset(rd, cond);
}
-
-void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+void TurboAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -461,14 +464,12 @@ void MacroAssembler::Csneg(const Register& rd,
csneg(rd, rn, rm, cond);
}
-
-void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
-
-void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
@@ -651,10 +652,12 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
if (bits == 0) {
fmov(vd, xzr);
} else {
- Ldr(vd, imm);
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, bits);
+ fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -678,12 +681,10 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
- // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -737,8 +738,7 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
-
-void MacroAssembler::Isb() {
+void TurboAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
@@ -748,12 +748,6 @@ void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
ldr(rt, operand);
}
-void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
- DCHECK(allow_macro_instructions());
- DCHECK(rt.Is64Bits());
- ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
-}
-
void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
@@ -1042,58 +1036,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
-void MacroAssembler::AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- DCHECK_GE(sp_alignment, 16);
- DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
-}
-
-void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
- DCHECK(!csp.Is(StackPointer()));
- if (!TmpList()->IsEmpty()) {
- Sub(csp, StackPointer(), space);
- } else {
- // TODO(jbramley): Several callers rely on this not using scratch
- // registers, so we use the assembler directly here. However, this means
- // that large immediate values of 'space' cannot be handled cleanly. (Only
- // 24-bits immediates or values of 'space' that can be encoded in one
- // instruction are accepted.) Once we implement our flexible scratch
- // register idea, we could greatly simplify this function.
- InstructionAccurateScope scope(this);
- DCHECK(space.IsImmediate());
- // Align to 16 bytes.
- uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
- DCHECK(is_uint24(imm));
-
- Register source = StackPointer();
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, source, 0xf);
- source = csp;
- }
- if (!is_uint12(imm)) {
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, source, imm_top_12_bits << 12);
- source = csp;
- imm -= imm_top_12_bits << 12;
- }
- if (imm > 0) {
- sub(csp, source, imm);
- }
- }
- AssertStackConsistency();
-}
-
-void TurboAssembler::SyncSystemStackPointer() {
- DCHECK(emit_debug_code());
- DCHECK(!csp.Is(StackPointer()));
- { InstructionAccurateScope scope(this);
- mov(csp, StackPointer());
- }
- AssertStackConsistency();
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -1249,14 +1191,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
+ DCHECK_EQ(size % 16, 0);
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@@ -1269,13 +1206,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
-
AssertPositiveOrZero(count);
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
@@ -1290,11 +1223,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@@ -1305,16 +1234,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
+ DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@@ -1329,14 +1250,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@@ -1378,14 +1292,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 3869046f74..267bc2151b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -15,6 +15,7 @@
#include "src/frame-constants.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -188,15 +189,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
- PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
- if (rd.Is(csp)) {
+ if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
- Mov(csp, temp);
- AssertStackConsistency();
+ Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@@ -294,7 +294,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
- AssertStackConsistency();
}
}
}
@@ -337,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
- // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ // If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@@ -596,11 +595,8 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
-
-void MacroAssembler::Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond) {
+void TurboAssembler::Csel(const Register& rd, const Register& rn,
+ const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -724,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
- if (rd.Is(csp)) {
+ if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
- } else if (rn.Is(csp)) {
+ } else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@@ -1105,9 +1101,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
- str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
- str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+ str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@@ -1166,9 +1162,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
- // Push up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in order
- // to maintain the 16-byte alignment for csp.
+ // Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@@ -1183,9 +1177,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- // Pop up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in
- // order to maintain the 16-byte alignment for csp.
+ // Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -1258,23 +1250,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
- str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
- stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
- stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
- str(src0, MemOperand(StackPointer(), 2 * size));
+ stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
+ str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
- // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
- stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
- stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@@ -1295,24 +1287,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
- ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
- ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
- ldr(dst2, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ ldr(dst2, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
- // registers to be popped using csp, whilst maintaining 16-byte alignment
- // for csp at all times.
- ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ // registers to be popped using sp, whilst maintaining 16-byte alignment
+ // for sp at all times.
+ ldp(dst2, dst3, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@@ -1322,43 +1314,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else {
- // Even if the current stack pointer is not the system stack pointer (csp),
- // the system stack pointer will still be modified in order to comply with
- // ABI rules about accessing memory below the system stack pointer.
- BumpSystemStackPointer(total_size);
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@@ -1376,7 +1352,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Str(src, MemOperand(StackPointer(), offset));
+ Str(src, MemOperand(sp, offset));
}
@@ -1388,14 +1364,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Ldr(dst, MemOperand(StackPointer(), offset));
+ Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
- Stp(src1, src2, MemOperand(StackPointer(), offset));
+ Stp(src1, src2, MemOperand(sp, offset));
}
@@ -1404,7 +1380,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
- Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+ Ldp(dst1, dst2, MemOperand(sp, offset));
}
@@ -1412,11 +1388,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@@ -1436,11 +1408,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@@ -1455,44 +1423,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
-void TurboAssembler::AssertStackConsistency() {
- // Avoid emitting code when !use_real_abort() since non-real aborts cause too
- // much code to be generated.
- if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer())) {
- // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
- // can't check the alignment of csp without using a scratch register (or
- // clobbering the flags), but the processor (or simulator) will abort if
- // it is not properly aligned during a load.
- ldr(xzr, MemOperand(csp, 0));
- }
- if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
- Label ok;
- // Check that csp <= StackPointer(), preserving all registers and NZCV.
- sub(StackPointer(), csp, StackPointer());
- cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
- tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
-
- // Avoid generating AssertStackConsistency checks for the Push in Abort.
- { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
- }
-
- bind(&ok);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- }
- }
-}
-
-void TurboAssembler::AssertCspAligned() {
+void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
- // TODO(titzer): use a real assert for alignment check?
+ // Arm64 requires the stack pointer to be 16-byte aligned prior to address
+ // calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
- ldr(temp, MemOperand(csp));
+ Mov(temp, sp);
+ Tst(temp, 15);
+ Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@@ -1568,11 +1507,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
- Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
+ Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
- Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
+ Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@@ -1630,6 +1569,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Swap(Register lhs, Register rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
+void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ VRegister temp = VRegister::no_reg();
+ if (lhs.IsS()) {
+ temp = temps.AcquireS();
+ } else if (lhs.IsD()) {
+ temp = temps.AcquireD();
+ } else {
+ DCHECK(lhs.IsQ());
+ temp = temps.AcquireQ();
+ }
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1792,6 +1759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ Mov(kOffHeapTrampolineRegister, bytes_address);
+ Br(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -1927,13 +1900,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- if (rmode == RelocInfo::NONE64) {
+ if (RelocInfo::IsNone(rmode)) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
@@ -2009,62 +1979,15 @@ int TurboAssembler::CallSize(Label* target) {
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type) {
- Label on_not_heap_number;
-
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, &on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
-
- Bind(&on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2110,12 +2033,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- Add(src_reg, StackPointer(),
- Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
- Add(src_reg, StackPointer(),
- (callee_args_count.immediate() + 1) * kPointerSize);
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@@ -2145,12 +2066,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(StackPointer(), src_reg);
+ Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(StackPointer(), dst_reg);
- AssertStackConsistency();
+ Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2224,12 +2144,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Mov(x4, Operand(debug_is_active));
+ Ldrsb(x4, MemOperand(x4));
+ Cbz(x4, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
+ Cbnz(x4, &call_hook);
+
+ Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(x4, &skip_hook);
+ Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
+ Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ B(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2284,7 +2220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = x4;
+ Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2343,16 +2279,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- LoadObject(x1, function);
- InvokeFunction(x1, expected, actual, flag);
-}
-
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
@@ -2402,7 +2328,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2414,21 +2340,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
- Mov(fp, csp);
+ Mov(fp, sp);
Push(type_reg, padreg);
- // csp[3] : lr
- // csp[2] : fp
- // csp[1] : type
- // csp[0] : for alignment
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2439,8 +2364,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, StackPointer(),
- TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@@ -2450,15 +2374,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(StackPointer(), fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
}
@@ -2493,7 +2414,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
- Mov(fp, StackPointer());
+ Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2540,13 +2461,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
- DCHECK(csp.Is(StackPointer()));
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
- Add(scratch, csp, kXRegSize);
+ Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -2555,8 +2474,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
- DCHECK(csp.Is(StackPointer()));
-
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@@ -2582,8 +2499,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
@@ -2752,7 +2668,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x31 (csp) because the system stack pointer doesn't need to be included
+ // - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@@ -3052,7 +2968,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
+ DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@@ -3164,12 +3080,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
- // We don't pass any arguments on the stack, but we still need to align the C
- // stack pointer to a 16-byte boundary for PCS compliance.
- if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xF);
- }
-
CallPrintf(arg_count, pcs);
}
@@ -3208,14 +3118,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
- // We can only print sp if it is the current stack pointer.
- if (!csp.Is(StackPointer())) {
- DCHECK(!csp.Aliases(arg0));
- DCHECK(!csp.Aliases(arg1));
- DCHECK(!csp.Aliases(arg2));
- DCHECK(!csp.Aliases(arg3));
- }
-
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -3224,8 +3126,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
- // If csp is the stack pointer, PushCPURegList asserts that the size of each
- // list is a multiple of 16 bytes.
+ // PushCPURegList asserts that the size of each list is a multiple of 16
+ // bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@@ -3241,15 +3143,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
- bool arg0_sp = StackPointer().Aliases(arg0);
- bool arg1_sp = StackPointer().Aliases(arg1);
- bool arg2_sp = StackPointer().Aliases(arg2);
- bool arg3_sp = StackPointer().Aliases(arg3);
+ bool arg0_sp = sp.Aliases(arg0);
+ bool arg1_sp = sp.Aliases(arg1);
+ bool arg2_sp = sp.Aliases(arg2);
+ bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
- Add(arg_sp, StackPointer(),
+ Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@@ -3302,7 +3204,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- DCHECK(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, sp));
return result;
}
@@ -3359,6 +3261,14 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
}
}
+void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
+ // We can use adr to load a pc relative location.
+ adr(rd, -pc_offset());
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Mov(kSpeculationPoisonRegister, -1);
+}
#undef __
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 47c08f2622..c72cb39536 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -47,12 +47,15 @@ namespace internal {
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
+#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
+#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
+#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -254,6 +257,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src);
+ // Register swap. Note that the register operands should be distinct.
+ void Swap(Register lhs, Register rhs);
+ void Swap(VRegister lhs, VRegister rhs);
+
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
@@ -549,6 +556,11 @@ class TurboAssembler : public Assembler {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Isb();
+ inline void Csdb();
+
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -581,20 +593,6 @@ class TurboAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
- // If emit_debug_code() is true, emit a run-time check to ensure that
- // StackPointer() does not point below the system stack pointer.
- //
- // Whilst it is architecturally legal for StackPointer() to point below csp,
- // it can be evidence of a potential bug because the ABI forbids accesses
- // below csp.
- //
- // If StackPointer() is the system stack pointer (csp), then csp will be
- // dereferenced to cause the processor (or simulator) to abort if it is not
- // properly aligned.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertStackConsistency();
-
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
@@ -614,9 +612,6 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
- // Return the stack pointer.
- inline const Register& StackPointer() const { return csp; }
-
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
@@ -650,9 +645,11 @@ class TurboAssembler : public Assembler {
inline void Cmp(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
+ void Csel(const Register& rd, const Register& rn, const Operand& operand,
+ Condition cond);
- // Emits a runtime assert that the CSP is aligned.
- void AssertCspAligned();
+ // Emits a runtime assert that the stack pointer is aligned.
+ void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@@ -687,17 +684,14 @@ class TurboAssembler : public Assembler {
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
- // Helper function for double immediate.
- inline void Ldr(const CPURegister& rt, double imm);
// Claim or drop stack space without actually accessing memory.
//
// In debug mode, both of these will write invalid data into the claimed or
// dropped space.
//
- // If the current stack pointer (according to StackPointer()) is csp, then it
- // must be aligned to 16 bytes and the size claimed or dropped must be a
- // multiple of 16 bytes.
+ // The stack pointer must be aligned to 16 bytes and the size claimed or
+ // dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
@@ -724,26 +718,6 @@ class TurboAssembler : public Assembler {
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
- // Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()).
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void SyncSystemStackPointer();
-
- // Push the system stack pointer (csp) down to allow the same to be done to
- // the current stack pointer (according to StackPointer()). This must be
- // called _before_ accessing the memory.
- //
- // This is necessary when pushing or otherwise adding things to the stack, to
- // satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed. The amount pushed will be increased as necessary
- // to ensure csp remains aligned to 16 bytes.
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void BumpSystemStackPointer(const Operand& space);
-
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
@@ -778,11 +752,6 @@ class TurboAssembler : public Assembler {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
- // Even if the current stack pointer is not the system stack pointer (csp),
- // Push (and derived methods) will still modify the system stack pointer in
- // order to comply with ABI rules about accessing memory below the system
- // stack pointer.
- //
// Other than the registers passed into Pop, the stack pointer and (possibly)
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
@@ -1011,17 +980,13 @@ class TurboAssembler : public Assembler {
inline void Clz(const Register& rd, const Register& rn);
- // Poke 'src' onto the stack. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
+ // be 16 byte aligned.
void Poke(const CPURegister& src, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
- // with 'src2' at a higher address than 'src1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
+ // stack pointer must be 16 byte aligned.
void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
@@ -1047,7 +1012,9 @@ class TurboAssembler : public Assembler {
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
Condition cond);
inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
@@ -1233,6 +1200,12 @@ class TurboAssembler : public Assembler {
inline void Fcvtas(const Register& rd, const VRegister& fn);
inline void Fcvtau(const Register& rd, const VRegister& fn);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(const Register& rd);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1257,8 +1230,8 @@ class TurboAssembler : public Assembler {
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack (csp) must be prepared by the caller as for a normal
- // AAPCS64 call to 'printf'.
+ // arguments and stack must be prepared by the caller as for a normal AAPCS64
+ // call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
@@ -1326,8 +1299,6 @@ class MacroAssembler : public TurboAssembler {
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
- void Csel(const Register& rd, const Register& rn, const Operand& operand,
- Condition cond);
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
@@ -1344,14 +1315,10 @@ class MacroAssembler : public TurboAssembler {
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void CzeroX(const Register& rd, Condition cond);
- inline void CmovX(const Register& rd, const Register& rn, Condition cond);
- inline void Csetm(const Register& rd, Condition cond);
inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
- inline void Dmb(BarrierDomain domain, BarrierType type);
- inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
inline void Fcsel(const VRegister& fd, const VRegister& fn,
@@ -1394,7 +1361,6 @@ class MacroAssembler : public TurboAssembler {
const VRegister& fm, const VRegister& fa);
inline void Hint(SystemHint code);
inline void Hlt(int code);
- inline void Isb();
inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
@@ -1641,17 +1607,13 @@ class MacroAssembler : public TurboAssembler {
};
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // The stack pointer must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
- // higher address than 'dst1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // higher address than 'dst1'. The offset is in bytes. The stack pointer must
+ // be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
@@ -1704,10 +1666,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
void PopCalleeSavedRegisters();
- // Align csp for a frame, as per ActivationFrameAlignment, and make it the
- // current stack pointer.
- inline void AlignAndSetCSPForFrame();
-
// Helpers ------------------------------------------------------------------
static int SafepointRegisterStackIndex(int reg_code);
@@ -1770,11 +1728,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
- void JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
- void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
-
// Try to represent a double as a signed 64-bit int.
// This succeeds if the result compares equal to the input, so inputs of -0.0
// are represented as 0 and handled as a success.
@@ -1817,6 +1770,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1841,9 +1797,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
// ---- Code generation helpers ----
@@ -1940,12 +1893,12 @@ class MacroAssembler : public TurboAssembler {
// Set up a stack frame and registers as follows:
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: SPOffset (new csp)
+ // fp[-8]: SPOffset (new sp)
// fp[-16]: CodeObject()
// fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
- // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // sp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
+ // sp -> sp[0]: Space reserved for the return address.
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
@@ -1960,8 +1913,6 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- //
- // The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles, const Register& scratch,
const Register& scratch2);
@@ -2030,11 +1981,6 @@ class MacroAssembler : public TurboAssembler {
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
- // At the moment it is only possible to print the value of csp if it is the
- // current stack pointer. Otherwise, the MacroAssembler will automatically
- // update csp on every push (using BumpSystemStackPointer), so determining its
- // value is difficult.
- //
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
@@ -2169,6 +2115,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
@@ -2210,7 +2157,7 @@ class InlineSmiCheckInfo {
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
- // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
//
// The generated patch information can be read using the InlineSMICheckInfo
// class.
@@ -2230,8 +2177,8 @@ class InlineSmiCheckInfo {
// Fields in the data encoded by InlineData.
- // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
- // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
+ // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
// used in a patchable check. The Emit() method checks this.
//
// Note that the total size of the fields is restricted by the underlying
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index d0c464dfbe..5c72cf1c90 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -626,16 +626,15 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
const char* Simulator::xreg_names[] = {
- "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
- "x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
- "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
- "cp", "x28", "fp", "lr", "xzr", "csp"};
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
+ "x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
const char* Simulator::wreg_names[] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
- "wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
+ "wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -768,7 +767,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
- if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
return kSPRegInternalCode;
}
return -1;
@@ -1450,7 +1449,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case BL:
set_lr(instr->following());
- // Fall through.
+ V8_FALLTHROUGH;
case B:
set_pc(instr->ImmPCOffsetTarget());
break;
@@ -1478,7 +1477,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
// this, but if we do trap to allow debugging.
Debug();
}
- // Fall through.
+ V8_FALLTHROUGH;
}
case BR:
case RET: set_pc(target); break;
@@ -1630,7 +1629,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
// Switch on the logical operation, stripping out the NOT bit, as it has a
// different meaning for logical immediate instructions.
switch (instr->Mask(LogicalOpMask & ~NOT)) {
- case ANDS: update_flags = true; // Fall through.
+ case ANDS: update_flags = true; V8_FALLTHROUGH;
case AND: result = op1 & op2; break;
case ORR: result = op1 | op2; break;
case EOR: result = op1 ^ op2; break;
@@ -2956,7 +2955,9 @@ void Simulator::VisitSystem(Instruction* instr) {
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
- case NOP: break;
+ case NOP:
+ case CSDB:
+ break;
default: UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
@@ -2996,15 +2997,15 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
- if (strcmp(desc, "csp") == 0) {
+ if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
+ clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
- } else if (strcmp(desc, "wcsp") == 0) {
+ } else if (strcmp(desc, "wsp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
+ clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
}
@@ -4396,15 +4397,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_LD1_4v:
case NEON_LD1_4v_post:
ld1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_3v:
case NEON_LD1_3v_post:
ld1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_2v:
case NEON_LD1_2v_post:
ld1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_1v:
case NEON_LD1_1v_post:
ld1(vf, vreg(reg[0]), addr[0]);
@@ -4412,15 +4416,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_ST1_4v:
case NEON_ST1_4v_post:
st1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_3v:
case NEON_ST1_3v_post:
st1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_2v:
case NEON_ST1_2v_post:
st1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_1v:
case NEON_ST1_1v_post:
st1(vf, vreg(reg[0]), addr[0]);
@@ -4533,7 +4540,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_b_post:
case NEON_LD4_b:
case NEON_LD4_b_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_b:
case NEON_ST1_b_post:
case NEON_ST2_b:
@@ -4552,7 +4560,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_h_post:
case NEON_LD4_h:
case NEON_LD4_h_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_h:
case NEON_ST1_h_post:
case NEON_ST2_h:
@@ -4572,7 +4581,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_s_post:
case NEON_LD4_s:
case NEON_LD4_s_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_s:
case NEON_ST1_s_post:
case NEON_ST2_s:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index a8f229d764..18fa4d44ec 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -646,6 +646,7 @@ class LogicVRegister {
class Simulator : public DecoderVisitor, public SimulatorBase {
public:
static void SetRedirectInstruction(Instruction* instruction);
+ static bool ICacheMatch(void* one, void* two) { return false; }
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 8cca6813f2..e8a7f1683b 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -3,9 +3,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
-mtrofin@chromium.org
mstarzinger@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 5e3d0d0c2a..fc56b7e23a 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -21,8 +21,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -66,18 +65,21 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
-#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#fname))); \
- Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
- if (func->shared()->code() != \
- isolate->builtins()->builtin(Builtins::kMath##FName)) { \
- return false; \
- } \
+#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
+ if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#fname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
+ if (shared->HasLazyDeserializationBuiltinId()) { \
+ if (shared->lazy_deserialization_builtin_id() != Builtins::kMath##FName) \
+ return false; \
+ } else if (shared->code() != \
+ isolate->builtins()->builtin(Builtins::kMath##FName)) { \
+ return false; \
+ } \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
@@ -284,11 +286,12 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
- SyncCompileTranslatedAsmJs(
- isolate, &thrower,
- wasm::ModuleWireBytes(module_->begin(), module_->end()),
- parse_info()->script(),
- Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
+ isolate->wasm_engine()
+ ->SyncCompileTranslatedAsmJs(
+ isolate, &thrower,
+ wasm::ModuleWireBytes(module_->begin(), module_->end()),
+ parse_info()->script(),
+ Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
@@ -389,7 +392,8 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
- wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
+ isolate->wasm_engine()->SyncInstantiate(isolate, &thrower, module,
+ foreign, memory);
if (maybe_module_object.is_null()) {
// An exception caused by the module start function will be set as pending
// and bypass the {ErrorThrower}, this happens in case of a stack overflow.
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 6be80bf7af..f210b42a62 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -745,6 +745,12 @@ void AsmJsParser::ValidateFunction() {
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
+
+ // Check against limit on number of parameters.
+ if (params.size() >= kV8MaxWasmFunctionParams) {
+ FAIL("Number of parameters exceeds internal limit");
+ }
+
CachedVector<ValueType> locals(cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
diff --git a/deps/v8/src/asmjs/switch-logic.h b/deps/v8/src/asmjs/switch-logic.h
index 3ef34d9461..f770ddc33d 100644
--- a/deps/v8/src/asmjs/switch-logic.h
+++ b/deps/v8/src/asmjs/switch-logic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ASMJS_SWITCH_LOGIC_H
-#define V8_ASMJS_SWITCH_LOGIC_H
+#ifndef V8_ASMJS_SWITCH_LOGIC_H_
+#define V8_ASMJS_SWITCH_LOGIC_H_
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -30,4 +30,4 @@ V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
} // namespace internal
} // namespace v8
-#endif // V8_ASMJS_SWITCH_LOGIC_H
+#endif // V8_ASMJS_SWITCH_LOGIC_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 1b83735bc9..c566f35acb 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -176,12 +176,12 @@ AssemblerBase::~AssemblerBase() {
if (own_buffer_) DeleteArray(buffer_);
}
-void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
+void AssemblerBase::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
- base::LockGuard<base::Mutex> lock_guard(isolate->simulator_i_cache_mutex());
- Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
+ base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
+ Simulator::FlushICache(Simulator::i_cache(), start, size);
#else
CpuFeatures::FlushICache(start, size);
#endif // USE_SIMULATOR
@@ -195,9 +195,6 @@ void AssemblerBase::Print(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
-PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
- : PredictableCodeSizeScope(assembler, -1) {}
-
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@@ -208,10 +205,7 @@ PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
- // TODO(svenpanne) Remove the 'if' when everything works.
- if (expected_size_ >= 0) {
- CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
- }
+ CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
assembler_->set_predictable_code_size(old_value_);
}
@@ -301,16 +295,16 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-void RelocInfo::set_wasm_context_reference(Isolate* isolate, Address address,
+void RelocInfo::set_wasm_context_reference(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmContextReference(rmode_));
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
-void RelocInfo::set_global_handle(Isolate* isolate, Address address,
+void RelocInfo::set_global_handle(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
@@ -318,10 +312,10 @@ Address RelocInfo::wasm_call_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
-void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
+void RelocInfo::set_wasm_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
@@ -341,17 +335,16 @@ Address RelocInfo::wasm_context_reference() const {
}
void RelocInfo::update_wasm_function_table_size_reference(
- Isolate* isolate, uint32_t old_size, uint32_t new_size,
- ICacheFlushMode icache_flush_mode) {
+ uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- set_embedded_size(isolate, new_size, icache_flush_mode);
+ set_embedded_size(new_size, icache_flush_mode);
}
-void RelocInfo::set_target_address(Isolate* isolate, Address target,
+void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
@@ -449,7 +442,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
}
}
last_pc_ = rinfo->pc();
- last_mode_ = rmode;
#ifdef DEBUG
DCHECK_LE(begin_pos - pos_, kMaxSize);
#endif
@@ -561,7 +553,8 @@ void RelocIterator::next() {
done_ = true;
}
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
+RelocIterator::RelocIterator(Code* code, int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@@ -569,35 +562,30 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.pc_ = desc.buffer;
- rinfo_.data_ = 0;
// Relocation info is read backwards.
pos_ = desc.buffer + desc.buffer_size;
end_ = pos_ - desc.reloc_size;
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
- int mode_mask) {
+ int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.pc_ = instructions.start();
- rinfo_.data_ = 0;
rinfo_.constant_pool_ = const_pool;
+ rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
// Relocation info is read backwards.
pos_ = reloc_info.start() + reloc_info.size();
end_ = reloc_info.start();
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -606,7 +594,7 @@ RelocIterator::RelocIterator(Vector<byte> instructions,
// Implementation of RelocInfo
#ifdef DEBUG
-bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
+bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
@@ -621,10 +609,8 @@ bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case NONE32:
- return "no reloc 32";
- case NONE64:
- return "no reloc 64";
+ case NONE:
+ return "no reloc";
case EMBEDDED_OBJECT:
return "embedded object";
case CODE_TARGET:
@@ -686,9 +672,21 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
<< ") (" << static_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTarget(rmode_)) {
- Code* code = Code::GetCodeFromTargetAddress(target_address());
- os << " (" << Code::Kind2String(code->kind()) << ") ("
- << static_cast<const void*>(target_address()) << ")";
+ const Address code_target = target_address();
+ if (flags_ & kInNativeWasmCode) {
+ os << " (wasm trampoline) ";
+ } else {
+ Code* code = Code::GetCodeFromTargetAddress(code_target);
+ DCHECK(code->IsCode());
+ os << " (" << Code::Kind2String(code->kind());
+ if (Builtins::IsBuiltin(code)) {
+ os << " " << Builtins::name(code->builtin_index());
+ } else if (code->kind() == Code::STUB) {
+ os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
+ }
+ os << ") ";
+ }
+ os << " (" << static_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
@@ -744,8 +742,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case JS_TO_WASM_CALL:
- case NONE32:
- case NONE64:
+ case NONE:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
@@ -1465,6 +1462,12 @@ ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
}
+ExternalReference ExternalReference::copy_typed_array_elements_slice(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
+}
+
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1877,22 +1880,5 @@ void Assembler::RequestHeapObject(HeapObjectRequest request) {
heap_object_requests_.push_front(request);
}
-namespace {
-int caller_saved_codes[kNumJSCallerSaved];
-}
-
-void SetUpJSCallerSavedCodeData() {
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
-
- DCHECK_EQ(i, kNumJSCallerSaved);
-}
-
-int JSCallerSavedCode(int n) {
- DCHECK(0 <= n && n < kNumJSCallerSaved);
- return caller_saved_codes[n];
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 0cebdbc2d7..c45ec6910d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -57,15 +57,12 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class InstructionStream;
class Isolate;
+class SCTableReference;
class SourcePosition;
class StatsCounter;
-void SetUpJSCallerSavedCodeData();
-
-// Return the code of the n-th saved register available to JavaScript.
-int JSCallerSavedCode(int n);
-
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
@@ -162,7 +159,7 @@ class AssemblerBase: public Malloced {
static const int kMinimalBufferSize = 4*KB;
- static void FlushICache(Isolate* isolate, void* start, size_t size);
+ static void FlushICache(void* start, size_t size);
protected:
// The buffer into which code and relocation info are generated. It could
@@ -220,16 +217,14 @@ class DontEmitDebugCodeScope BASE_EMBEDDED {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
- explicit PredictableCodeSizeScope(AssemblerBase* assembler);
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
- void ExpectSize(int expected_size) { expected_size_ = expected_size; }
private:
- AssemblerBase* assembler_;
- int expected_size_;
- int start_offset_;
- bool old_value_;
+ AssemblerBase* const assembler_;
+ int const expected_size_;
+ int const start_offset_;
+ bool const old_value_;
};
@@ -252,6 +247,8 @@ class CpuFeatureScope BASE_EMBEDDED {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
+ // Define a destructor to avoid unused variable warnings.
+ ~CpuFeatureScope() {}
#endif
};
@@ -283,7 +280,7 @@ class CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
- static inline bool SupportsCrankshaft();
+ static inline bool SupportsOptimizer();
static inline bool SupportsWasmSimd128();
@@ -341,6 +338,12 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
+ enum Flag : uint8_t {
+ kNoFlags = 0,
+ kInNativeWasmCode = 1u << 0, // Reloc info belongs to native wasm code.
+ };
+ typedef base::Flags<Flag> Flags;
+
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@@ -357,7 +360,7 @@ class RelocInfo {
// The maximum pc delta that will use the short encoding.
static const int kMaxSmallPCDelta;
- enum Mode {
+ enum Mode : int8_t {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
EMBEDDED_OBJECT,
@@ -395,8 +398,7 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
+ NONE, // never recorded value
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
@@ -456,9 +458,7 @@ class RelocInfo {
static inline bool IsInternalReferenceEncoded(Mode mode) {
return mode == INTERNAL_REFERENCE_ENCODED;
}
- static inline bool IsNone(Mode mode) {
- return mode == NONE32 || mode == NONE64;
- }
+ static inline bool IsNone(Mode mode) { return mode == NONE; }
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
@@ -476,7 +476,7 @@ class RelocInfo {
mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
- static inline int ModeMask(Mode mode) { return 1 << mode; }
+ static constexpr int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
byte* pc() const { return pc_; }
@@ -485,6 +485,9 @@ class RelocInfo {
intptr_t data() const { return data_; }
Code* host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
+ void set_constant_pool(Address constant_pool) {
+ constant_pool_ = constant_pool;
+ }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@@ -508,25 +511,22 @@ class RelocInfo {
Address wasm_call_address() const;
void set_wasm_context_reference(
- Isolate* isolate, Address address,
+ Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
- Isolate* isolate, uint32_t old_base, uint32_t new_base,
+ uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
- Isolate* isolate, Address target,
+ Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void set_global_handle(
- Isolate* isolate, Address address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_global_handle(Address address, ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
- Isolate*, Address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
- Isolate*, Address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -539,7 +539,7 @@ class RelocInfo {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(
- Isolate* isolate, Address target,
+ Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
@@ -585,15 +585,15 @@ class RelocInfo {
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
- INLINE(void WipeOut(Isolate* isolate));
+ INLINE(void WipeOut());
template <typename ObjectVisitor>
- inline void Visit(Isolate* isolate, ObjectVisitor* v);
+ inline void Visit(ObjectVisitor* v);
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
- static bool RequiresRelocation(Isolate* isolate, const CodeDesc& desc);
+ static bool RequiresRelocation(const CodeDesc& desc);
#endif
#ifdef ENABLE_DISASSEMBLER
@@ -609,10 +609,8 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
- void set_embedded_address(Isolate* isolate, Address address,
- ICacheFlushMode flush_mode);
- void set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode);
+ void set_embedded_address(Address address, ICacheFlushMode flush_mode);
+ void set_embedded_size(uint32_t size, ICacheFlushMode flush_mode);
uint32_t embedded_size() const;
Address embedded_address() const;
@@ -623,9 +621,10 @@ class RelocInfo {
// comment).
byte* pc_;
Mode rmode_;
- intptr_t data_;
+ intptr_t data_ = 0;
Code* host_;
Address constant_pool_ = nullptr;
+ Flags flags_;
friend class RelocIterator;
};
@@ -635,7 +634,6 @@ class RelocInfo {
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -651,10 +649,7 @@ class RelocInfoWriter BASE_EMBEDDED {
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
- // On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
- // On x64 this is 1 + 4 + 1 + 1 + 8 == 15;
- // Here we use the maximum of the two.
- static const int kMaxSize = 15;
+ static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
private:
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
@@ -669,7 +664,6 @@ class RelocInfoWriter BASE_EMBEDDED {
byte* pos_;
byte* last_pc_;
- RelocInfo::Mode last_mode_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@@ -733,19 +727,14 @@ class RelocIterator: public Malloced {
const byte* pos_;
const byte* end_;
RelocInfo rinfo_;
- bool done_;
- int mode_mask_;
+ bool done_ = false;
+ const int mode_mask_;
+
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
-
//------------------------------------------------------------------------------
-// External function
-
-//----------------------------------------------------------------------------
-class SCTableReference;
-class Debug_Address;
-
+// External references
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
@@ -800,9 +789,7 @@ class ExternalReference BASE_EMBEDDED {
static void SetUp();
- // These functions must use the isolate in a thread-safe way.
- typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
- Type type);
+ typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference() : address_(nullptr) {}
@@ -999,6 +986,7 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference copy_typed_array_elements_to_typed_array(
Isolate* isolate);
+ static ExternalReference copy_typed_array_elements_slice(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@@ -1073,9 +1061,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == nullptr)
- ? address
- : (*redirector)(isolate, address, type);
+ void* answer =
+ (redirector == nullptr) ? address : (*redirector)(address, type);
return answer;
}
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index ece6b5048a..32362199ae 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -6,7 +6,6 @@ littledan@chromium.org
marja@chromium.org
mstarzinger@chromium.org
neis@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.h b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
index 837595f41b..400196da68 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.h
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
-#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
+#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
@@ -33,4 +33,4 @@ class AstFunctionLiteralIdReindexer final
} // namespace internal
} // namespace v8
-#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
deleted file mode 100644
index ade1a85349..0000000000
--- a/deps/v8/src/ast/ast-numbering.cc
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast-numbering.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/compiler.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
- public:
- AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
- Compiler::EagerInnerFunctionLiterals* eager_literals)
- : zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
- InitializeAstVisitor(stack_limit);
- }
-
- bool Renumber(FunctionLiteral* node);
-
- private:
-// AST node visitor interface.
-#define DEFINE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
- void VisitSuspend(Suspend* node);
-
- void VisitStatementsAndDeclarations(Block* node);
- void VisitStatements(ZoneList<Statement*>* statements);
- void VisitDeclarations(Declaration::List* declarations);
- void VisitArguments(ZoneList<Expression*>* arguments);
- void VisitLiteralProperty(LiteralProperty* property);
-
- Zone* zone() const { return zone_; }
-
- Zone* zone_;
- Compiler::EagerInnerFunctionLiterals* eager_literals_;
- int suspend_count_;
- FunctionKind function_kind_;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
-};
-
-void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
- VisitVariableProxy(node->proxy());
-}
-
-void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
-}
-
-void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- Visit(node->statement());
-}
-
-void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
-}
-
-void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
-}
-
-void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
-}
-
-void AstNumberingVisitor::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* node) {
-}
-
-void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
- Visit(node->block());
- Visit(node->result());
-}
-
-void AstNumberingVisitor::VisitLiteral(Literal* node) {
-}
-
-void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
-}
-
-void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
-}
-
-void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
-}
-
-void AstNumberingVisitor::VisitSuperPropertyReference(
- SuperPropertyReference* node) {
- Visit(node->this_var());
- Visit(node->home_object());
-}
-
-void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
- Visit(node->this_var());
- Visit(node->new_target_var());
- Visit(node->this_function_var());
-}
-
-void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitSuspend(Suspend* node) {
- node->set_suspend_id(suspend_count_);
- suspend_count_++;
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
-
-void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
- node->set_suspend_id(suspend_count_++);
- if (IsAsyncGeneratorFunction(function_kind_)) {
- node->set_await_iterator_close_suspend_id(suspend_count_++);
- node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
- }
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
-
-void AstNumberingVisitor::VisitThrow(Throw* node) {
- Visit(node->exception());
-}
-
-void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitBlock(Block* node) {
- VisitStatementsAndDeclarations(node);
-}
-
-void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
- Scope* scope = node->scope();
- DCHECK(scope == nullptr || !scope->HasBeenRemoved());
- if (scope) VisitDeclarations(scope->declarations());
- VisitStatements(node->statements());
-}
-
-void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
- VisitVariableProxy(node->proxy());
- VisitFunctionLiteral(node->fun());
-}
-
-void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
- Visit(node->expression());
- Visit(node->statement());
-}
-
-void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
- node->set_first_suspend_id(suspend_count_);
- Visit(node->body());
- Visit(node->cond());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
- node->set_first_suspend_id(suspend_count_);
- Visit(node->cond());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
- DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
- Visit(node->try_block());
- Visit(node->catch_block());
-}
-
-void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-void AstNumberingVisitor::VisitProperty(Property* node) {
- Visit(node->key());
- Visit(node->obj());
-}
-
-void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
- Visit(node->object());
- Visit(node->property());
-}
-
-void AstNumberingVisitor::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Visit(node->value());
-}
-
-void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
- VisitBinaryOperation(node->binary_operation());
- VisitAssignment(node);
-}
-
-void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-void AstNumberingVisitor::VisitNaryOperation(NaryOperation* node) {
- Visit(node->first());
- for (size_t i = 0; i < node->subsequent_length(); ++i) {
- Visit(node->subsequent(i));
- }
-}
-
-void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-void AstNumberingVisitor::VisitSpread(Spread* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
- UNREACHABLE();
-}
-
-void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
- Visit(node->iterable());
-}
-
-void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
-
-void AstNumberingVisitor::VisitImportCallExpression(
- ImportCallExpression* node) {
- Visit(node->argument());
-}
-
-void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
- Visit(node->enumerable()); // Not part of loop.
- node->set_first_suspend_id(suspend_count_);
- Visit(node->each());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
- Visit(node->assign_iterator()); // Not part of loop.
- Visit(node->assign_next());
- node->set_first_suspend_id(suspend_count_);
- Visit(node->next_result());
- Visit(node->result_done());
- Visit(node->assign_each());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Visit(node->then_expression());
- Visit(node->else_expression());
-}
-
-void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
- Visit(node->condition());
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Visit(node->else_statement());
- }
-}
-
-void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
- Visit(node->tag());
- for (CaseClause* clause : *node->cases()) {
- if (!clause->is_default()) Visit(clause->label());
- VisitStatements(clause->statements());
- }
-}
-
-void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
- if (node->init() != nullptr) Visit(node->init()); // Not part of loop.
- node->set_first_suspend_id(suspend_count_);
- if (node->cond() != nullptr) Visit(node->cond());
- if (node->next() != nullptr) Visit(node->next());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
- if (node->extends()) Visit(node->extends());
- if (node->constructor()) Visit(node->constructor());
- if (node->static_fields_initializer() != nullptr) {
- Visit(node->static_fields_initializer());
- }
- if (node->instance_fields_initializer_function() != nullptr) {
- Visit(node->instance_fields_initializer_function());
- }
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* node) {
- for (int i = 0; i < node->fields()->length(); i++) {
- VisitLiteralProperty(node->fields()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
- Visit(node->key());
- Visit(node->value());
-}
-
-void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitCall(Call* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitCallNew(CallNew* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- if (statements == nullptr) return;
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- if (statements->at(i)->IsJump()) break;
- }
-}
-
-void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
- for (Declaration* decl : *decls) Visit(decl);
-}
-
-void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
- if (node->ShouldEagerCompile()) {
- if (eager_literals_) {
- eager_literals_->Add(new (zone())
- ThreadedListZoneEntry<FunctionLiteral*>(node));
- }
-
- // If the function literal is being eagerly compiled, recurse into the
- // declarations and body of the function literal.
- if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
- SetStackOverflow();
- return;
- }
- }
-}
-
-void AstNumberingVisitor::VisitRewritableExpression(
- RewritableExpression* node) {
- Visit(node->expression());
-}
-
-bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
- DeclarationScope* scope = node->scope();
- DCHECK(!scope->HasBeenRemoved());
- function_kind_ = node->kind();
-
- VisitDeclarations(scope->declarations());
- VisitStatements(node->body());
-
- node->set_suspend_count(suspend_count_);
-
- return !HasStackOverflow();
-}
-
-bool AstNumbering::Renumber(
- uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- Compiler::EagerInnerFunctionLiterals* eager_literals) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
- return visitor.Renumber(function);
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
deleted file mode 100644
index 11122803b8..0000000000
--- a/deps/v8/src/ast/ast-numbering.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_NUMBERING_H_
-#define V8_AST_AST_NUMBERING_H_
-
-#include <stdint.h>
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FunctionLiteral;
-class Isolate;
-class Zone;
-template <typename T>
-class ThreadedList;
-template <typename T>
-class ThreadedListZoneEntry;
-template <typename T>
-class ZoneVector;
-
-namespace AstNumbering {
-// Assign bailout IDs, and generator suspend IDs to an AST node tree; perform
-// catch prediction for TryStatements. If |eager_literals| is non-null, adds any
-// eager inner literal functions into it.
-bool Renumber(
- uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
-}
-
-// Some details on suspend IDs
-// -------------------------
-//
-// In order to assist Ignition in generating bytecode for a generator function,
-// we assign a unique number (the suspend ID) to each Suspend node in its AST.
-// We also annotate loops with the number of suspends they contain
-// (loop.suspend_count) and the smallest ID of those (loop.first_suspend_id),
-// and we annotate the function itself with the number of suspends it contains
-// (function.suspend_count).
-//
-// The way in which we choose the IDs is simply by enumerating the Suspend
-// nodes.
-// Ignition relies on the following properties:
-// - For each loop l and each suspend y of l:
-// l.first_suspend_id <=
-// s.suspend_id < l.first_suspend_id + l.suspend_count
-// - For the generator function f itself and each suspend s of f:
-// 0 <= s.suspend_id < f.suspend_count
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_NUMBERING_H_
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index da14d87475..2856abb40c 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -311,7 +311,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
: LiteralProperty(key, value, is_computed_name),
kind_(kind),
is_static_(is_static),
- computed_name_var_(nullptr) {}
+ private_or_computed_name_var_(nullptr) {}
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
@@ -683,8 +683,8 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
}
}
}
- return isolate->factory()->NewTemplateObjectDescription(
- this->hash(), raw_strings, cooked_strings);
+ return isolate->factory()->NewTemplateObjectDescription(raw_strings,
+ cooked_strings);
}
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index f608621d3b..661c5b7293 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -437,21 +437,12 @@ class IterationStatement : public BreakableStatement {
ZoneList<const AstRawString*>* labels() const { return labels_; }
- int suspend_count() const { return suspend_count_; }
- int first_suspend_id() const { return first_suspend_id_; }
- void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
- void set_first_suspend_id(int first_suspend_id) {
- first_suspend_id_ = first_suspend_id;
- }
-
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
- body_(nullptr),
- suspend_count_(0),
- first_suspend_id_(0) {}
+ body_(nullptr) {}
void Initialize(Statement* body) { body_ = body; }
static const uint8_t kNextBitFieldIndex =
@@ -460,8 +451,6 @@ class IterationStatement : public BreakableStatement {
private:
ZoneList<const AstRawString*>* labels_;
Statement* body_;
- int suspend_count_;
- int first_suspend_id_;
};
@@ -1486,6 +1475,7 @@ class ArrayLiteral final : public AggregateLiteral {
ZoneList<Expression*>* values_;
};
+enum class HoleCheckMode { kRequired, kElided };
class VariableProxy final : public Expression {
public:
@@ -1540,6 +1530,11 @@ class VariableProxy final : public Expression {
HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
}
+ bool is_private_field() const { return IsPrivateField::decode(bit_field_); }
+ void set_is_private_field() {
+ bit_field_ = IsPrivateField::update(bit_field_, true);
+ }
+
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1559,7 +1554,8 @@ class VariableProxy final : public Expression {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
- HoleCheckModeField::encode(HoleCheckMode::kElided);
+ HoleCheckModeField::encode(HoleCheckMode::kElided) |
+ IsPrivateField::encode(false);
}
explicit VariableProxy(const VariableProxy* copy_from);
@@ -1571,6 +1567,7 @@ class VariableProxy final : public Expression {
class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
+ class IsPrivateField : public BitField<bool, HoleCheckModeField::kNext, 1> {};
union {
const AstRawString* raw_name_; // if !is_resolved_
@@ -1590,7 +1587,6 @@ enum LhsKind {
KEYED_SUPER_PROPERTY
};
-
class Property final : public Expression {
public:
bool IsValidReferenceExpression() const { return true; }
@@ -2096,11 +2092,6 @@ class Suspend : public Expression {
return OnAbruptResumeField::decode(bit_field_);
}
- int suspend_id() const { return suspend_id_; }
- void set_suspend_id(int id) { suspend_id_ = id; }
-
- inline bool IsInitialYield() const { return suspend_id_ == 0 && IsYield(); }
-
private:
friend class AstNodeFactory;
friend class Yield;
@@ -2109,11 +2100,10 @@ class Suspend : public Expression {
Suspend(NodeType node_type, Expression* expression, int pos,
OnAbruptResume on_abrupt_resume)
- : Expression(pos, node_type), suspend_id_(-1), expression_(expression) {
+ : Expression(pos, node_type), expression_(expression) {
bit_field_ |= OnAbruptResumeField::encode(on_abrupt_resume);
}
- int suspend_id_;
Expression* expression_;
class OnAbruptResumeField
@@ -2128,47 +2118,11 @@ class Yield final : public Suspend {
};
class YieldStar final : public Suspend {
- public:
- // In addition to the normal suspend for yield*, a yield* in an async
- // generator has 2 additional suspends:
- // - One for awaiting the iterator result of closing the generator when
- // resumed with a "throw" completion, and a throw method is not present
- // on the delegated iterator (await_iterator_close_suspend_id)
- // - One for awaiting the iterator result yielded by the delegated iterator
- // (await_delegated_iterator_output_suspend_id)
- int await_iterator_close_suspend_id() const {
- return await_iterator_close_suspend_id_;
- }
- void set_await_iterator_close_suspend_id(int id) {
- await_iterator_close_suspend_id_ = id;
- }
-
- int await_delegated_iterator_output_suspend_id() const {
- return await_delegated_iterator_output_suspend_id_;
- }
- void set_await_delegated_iterator_output_suspend_id(int id) {
- await_delegated_iterator_output_suspend_id_ = id;
- }
-
- inline int suspend_count() const {
- if (await_iterator_close_suspend_id_ != -1) {
- DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
- return 3;
- }
- return 1;
- }
-
private:
friend class AstNodeFactory;
-
YieldStar(Expression* expression, int pos)
: Suspend(kYieldStar, expression, pos,
- Suspend::OnAbruptResume::kNoControl),
- await_iterator_close_suspend_id_(-1),
- await_delegated_iterator_output_suspend_id_(-1) {}
-
- int await_iterator_close_suspend_id_;
- int await_delegated_iterator_output_suspend_id_;
+ Suspend::OnAbruptResume::kNoControl) {}
};
class Await final : public Suspend {
@@ -2407,14 +2361,29 @@ class FunctionLiteral final : public Expression {
// about a class literal's properties from the parser to the code generator.
class ClassLiteralProperty final : public LiteralProperty {
public:
- enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
+ enum Kind : uint8_t { METHOD, GETTER, SETTER, PUBLIC_FIELD, PRIVATE_FIELD };
Kind kind() const { return kind_; }
bool is_static() const { return is_static_; }
- void set_computed_name_var(Variable* var) { computed_name_var_ = var; }
- Variable* computed_name_var() const { return computed_name_var_; }
+ void set_computed_name_var(Variable* var) {
+ DCHECK_EQ(PUBLIC_FIELD, kind());
+ private_or_computed_name_var_ = var;
+ }
+ Variable* computed_name_var() const {
+ DCHECK_EQ(PUBLIC_FIELD, kind());
+ return private_or_computed_name_var_;
+ }
+
+ void set_private_field_name_var(Variable* var) {
+ DCHECK_EQ(PRIVATE_FIELD, kind());
+ private_or_computed_name_var_ = var;
+ }
+ Variable* private_field_name_var() const {
+ DCHECK_EQ(PRIVATE_FIELD, kind());
+ return private_or_computed_name_var_;
+ }
private:
friend class AstNodeFactory;
@@ -2424,7 +2393,7 @@ class ClassLiteralProperty final : public LiteralProperty {
Kind kind_;
bool is_static_;
- Variable* computed_name_var_;
+ Variable* private_or_computed_name_var_;
};
class InitializeClassFieldsStatement final : public Statement {
@@ -2665,7 +2634,6 @@ class GetTemplateObject final : public Expression {
const ZoneList<const AstRawString*>* raw_strings() const {
return raw_strings_;
}
- int hash() const { return hash_; }
Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
@@ -2673,16 +2641,13 @@ class GetTemplateObject final : public Expression {
friend class AstNodeFactory;
GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int hash,
- int pos)
+ const ZoneList<const AstRawString*>* raw_strings, int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
- raw_strings_(raw_strings),
- hash_(hash) {}
+ raw_strings_(raw_strings) {}
const ZoneList<const AstRawString*>* cooked_strings_;
const ZoneList<const AstRawString*>* raw_strings_;
- int hash_;
};
// ----------------------------------------------------------------------------
@@ -3257,9 +3222,8 @@ class AstNodeFactory final BASE_EMBEDDED {
GetTemplateObject* NewGetTemplateObject(
const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int hash, int pos) {
- return new (zone_)
- GetTemplateObject(cooked_strings, raw_strings, hash, pos);
+ const ZoneList<const AstRawString*>* raw_strings, int pos) {
+ return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
index e8ded43122..874bc1b32f 100644
--- a/deps/v8/src/ast/compile-time-value.h
+++ b/deps/v8/src/ast/compile-time-value.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_COMPILE_TIME_VALUE
-#define V8_AST_COMPILE_TIME_VALUE
+#ifndef V8_AST_COMPILE_TIME_VALUE_H_
+#define V8_AST_COMPILE_TIME_VALUE_H_
#include "src/allocation.h"
#include "src/globals.h"
@@ -43,4 +43,4 @@ class CompileTimeValue : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_AST_COMPILE_TIME_VALUE
+#endif // V8_AST_COMPILE_TIME_VALUE_H_
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 374c848289..d898a70479 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -909,8 +909,6 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -919,8 +917,6 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -929,8 +925,6 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -941,8 +935,6 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -951,8 +943,6 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result());
PrintIndentedVisit("DONE", node->result_done());
@@ -1053,8 +1043,11 @@ void AstPrinter::PrintClassProperties(
case ClassLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
- case ClassLiteral::Property::FIELD:
- prop_kind = "FIELD";
+ case ClassLiteral::Property::PUBLIC_FIELD:
+ prop_kind = "PUBLIC FIELD";
+ break;
+ case ClassLiteral::Property::PRIVATE_FIELD:
+ prop_kind = "PRIVATE FIELD";
break;
}
EmbeddedVector<char, 128> buf;
@@ -1208,21 +1201,21 @@ void AstPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "YIELD id %d", node->suspend_id());
+ SNPrintF(buf, "YIELD");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitYieldStar(YieldStar* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "YIELD_STAR id %d", node->suspend_id());
+ SNPrintF(buf, "YIELD_STAR");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitAwait(Await* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "AWAIT id %d", node->suspend_id());
+ SNPrintF(buf, "AWAIT");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
@@ -1232,7 +1225,6 @@ void AstPrinter::VisitThrow(Throw* node) {
Visit(node->exception());
}
-
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY");
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 8f2f85080c..a87e756a0e 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -643,7 +643,7 @@ void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
}
}
-void DeclarationScope::Analyze(ParseInfo* info) {
+bool DeclarationScope::Analyze(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
@@ -681,7 +681,7 @@ void DeclarationScope::Analyze(ParseInfo* info) {
info->consumed_preparsed_scope_data()->RestoreScopeAllocationData(scope);
}
- scope->AllocateVariables(info);
+ if (!scope->AllocateVariables(info)) return false;
#ifdef DEBUG
if (info->is_native() ? FLAG_print_builtin_scopes : FLAG_print_scopes) {
@@ -691,6 +691,8 @@ void DeclarationScope::Analyze(ParseInfo* info) {
scope->CheckScopePositions();
scope->CheckZones();
#endif
+
+ return true;
}
void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
@@ -1342,13 +1344,18 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr;
}
-void DeclarationScope::AllocateVariables(ParseInfo* info) {
+bool DeclarationScope::AllocateVariables(ParseInfo* info) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
- ResolveVariablesRecursively(info);
+ if (!ResolveVariablesRecursively(info)) {
+ DCHECK(info->pending_error_handler()->has_pending_error());
+ return false;
+ }
AllocateVariablesRecursively();
+
+ return true;
}
bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
@@ -1811,7 +1818,8 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
return var;
}
-Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
+Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
+ Scope* outer_scope_end) {
DCHECK_NE(outer_scope_end, this);
// Short-cut: whenever we find a debug-evaluate scope, just look everything up
// dynamically. Debug-evaluate doesn't properly create scope info for the
@@ -1834,6 +1842,15 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
// We may just be trying to find all free variables. In that case, don't
// declare them in the outer scope.
if (!is_script_scope()) return nullptr;
+
+ if (proxy->is_private_field()) {
+ info->pending_error_handler()->ReportMessageAt(
+ proxy->position(), proxy->position() + 1,
+ MessageTemplate::kInvalidPrivateFieldAccess, proxy->raw_name(),
+ kSyntaxError);
+ return nullptr;
+ }
+
// No binding has been found. Declare a variable on the global object.
return AsDeclarationScope()->DeclareDynamicGlobal(proxy->raw_name(),
NORMAL_VARIABLE);
@@ -1841,7 +1858,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
DCHECK(!is_script_scope());
- var = outer_scope_->LookupRecursive(proxy, outer_scope_end);
+ var = outer_scope_->LookupRecursive(info, proxy, outer_scope_end);
// The variable could not be resolved statically.
if (var == nullptr) return var;
@@ -1899,11 +1916,16 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
return var;
}
-void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
+bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(info->script_scope()->is_script_scope());
DCHECK(!proxy->is_resolved());
- Variable* var = LookupRecursive(proxy, nullptr);
+ Variable* var = LookupRecursive(info, proxy, nullptr);
+ if (var == nullptr) {
+ DCHECK(proxy->is_private_field());
+ return false;
+ }
ResolveTo(info, proxy, var);
+ return true;
}
namespace {
@@ -1983,8 +2005,8 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
// The following variable name may be minified. If so, disable
// minification in js2c.py for better output.
Handle<String> name = proxy->raw_name()->string();
- V8_Fatal(__FILE__, __LINE__, "Unbound variable: '%s' in native script.",
- name->ToCString().get());
+ FATAL("Unbound variable: '%s' in native script.",
+ name->ToCString().get());
}
VariableLocation location = var->location();
DCHECK(location == VariableLocation::LOCAL ||
@@ -1999,7 +2021,7 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
proxy->BindTo(var);
}
-void Scope::ResolveVariablesRecursively(ParseInfo* info) {
+bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
@@ -2008,7 +2030,11 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK_EQ(variables_.occupancy(), 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
+ Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr);
+ if (var == nullptr) {
+ DCHECK(proxy->is_private_field());
+ return false;
+ }
if (!var->is_dynamic()) {
var->set_is_used();
var->ForceContextAllocation();
@@ -2019,15 +2045,16 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- ResolveVariable(info, proxy);
+ if (!ResolveVariable(info, proxy)) return false;
}
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
- scope->ResolveVariablesRecursively(info);
+ if (!scope->ResolveVariablesRecursively(info)) return false;
}
}
+ return true;
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
@@ -2050,7 +2077,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
Variable* var =
- lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
+ lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index d2e8886319..2ffaaf6752 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -587,10 +587,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// scope, and stopping when reaching the outer_scope_end scope. If the code is
// executed because of a call to 'eval', the context parameter should be set
// to the calling context of 'eval'.
- Variable* LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end);
+ Variable* LookupRecursive(ParseInfo* info, VariableProxy* proxy,
+ Scope* outer_scope_end);
void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
- void ResolveVariable(ParseInfo* info, VariableProxy* proxy);
- void ResolveVariablesRecursively(ParseInfo* info);
+ MUST_USE_RESULT bool ResolveVariable(ParseInfo* info, VariableProxy* proxy);
+ MUST_USE_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
@@ -849,7 +850,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
- static void Analyze(ParseInfo* info);
+ //
+ // Returns false if private fields can not be resolved and
+ // ParseInfo's pending_error_handler will be populated with an
+ // error. Otherwise, returns true.
+ MUST_USE_RESULT
+ static bool Analyze(ParseInfo* info);
// To be called during parsing. Do just enough scope analysis that we can
// discard the Scope contents for lazily compiled functions. In particular,
@@ -920,7 +926,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
- void AllocateVariables(ParseInfo* info);
+ //
+ // Returns false if private fields can not be resolved.
+ bool AllocateVariables(ParseInfo* info);
void SetDefaults();
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index c8e81c69d4..51e3708b6a 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -26,6 +26,7 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
+ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
@@ -55,8 +56,6 @@ namespace internal {
"Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kTheCurrentStackPointerIsBelowCsp, \
- "The current stack pointer is below csp") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
@@ -89,6 +88,7 @@ namespace internal {
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
+ V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function")
#define BAILOUT_MESSAGES_LIST(V) \
@@ -100,6 +100,7 @@ namespace internal {
"Cyclic object state detected by escape analysis") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
+ V(kFunctionTooBig, "Function is too big to be optimized") \
V(kLiveEdit, "LiveEdit") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNotEnoughVirtualRegistersRegalloc, \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index d482d75cfb..0fcf818069 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -6,7 +6,7 @@
'../third_party/icu/icu.isolate',
# MSVS runtime libraries.
- '../gypfiles/win/msvs_dependencies.isolate',
+ '../gni/msvs_dependencies.isolate',
],
'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 854d846cc0..5ba1ad4246 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ATOMIC_UTILS_H_
-#define V8_ATOMIC_UTILS_H_
+#ifndef V8_BASE_ATOMIC_UTILS_H_
+#define V8_BASE_ATOMIC_UTILS_H_
#include <limits.h>
#include <type_traits>
@@ -419,4 +419,4 @@ class AtomicElement {
} // namespace base
} // namespace v8
-#endif // #define V8_ATOMIC_UTILS_H_
+#endif // V8_BASE_ATOMIC_UTILS_H_
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index 0779bfbd25..bd79558313 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -26,8 +26,8 @@
// needs to increment twice (which the compiler should be able to detect and
// optimize).
-#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#include <atomic>
diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h
index f47152aa8c..4ce7b461e0 100644
--- a/deps/v8/src/base/atomicops_internals_std.h
+++ b/deps/v8/src/base/atomicops_internals_std.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ATOMICOPS_INTERNALS_STD_H_
-#define BASE_ATOMICOPS_INTERNALS_STD_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_STD_H_
#include <atomic>
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 22e0511dc7..6ab0ffee29 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -335,7 +335,8 @@ CPU::CPU()
has_vfp3_(false),
has_vfp3_d32_(false),
is_fp64_mode_(false),
- has_non_stop_time_stamp_counter_(false) {
+ has_non_stop_time_stamp_counter_(false),
+ has_msa_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
index 271f0ffb05..afd9a1fc25 100644
--- a/deps/v8/src/base/file-utils.h
+++ b/deps/v8/src/base/file-utils.h
@@ -18,4 +18,4 @@ V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
} // namespace base
} // namespace v8
-#endif // V8_FILE_UTILS_H_
+#endif // V8_BASE_FILE_UTILS_H_
diff --git a/deps/v8/src/base/format-macros.h b/deps/v8/src/base/format-macros.h
index 5f5fe5df24..e2234684a8 100644
--- a/deps/v8/src/base/format-macros.h
+++ b/deps/v8/src/base/format-macros.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_FORMAT_MACROS_H_
-#define BASE_FORMAT_MACROS_H_
+#ifndef V8_BASE_FORMAT_MACROS_H_
+#define V8_BASE_FORMAT_MACROS_H_
// This file defines the format macros for some integer types.
@@ -94,4 +94,4 @@
#endif
-#endif // BASE_FORMAT_MACROS_H_
+#endif // V8_BASE_FORMAT_MACROS_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index ad5349ac7e..e58fdba09f 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -119,16 +119,50 @@ DEFINE_CHECK_OP_IMPL(GT)
} // namespace base
} // namespace v8
+namespace {
+
+// FailureMessage is a stack allocated object which has a special marker field
+// at the start and at the end. This makes it possible to retrieve the embedded
+// message from the stack.
+//
+class FailureMessage {
+ public:
+ explicit FailureMessage(const char* format, va_list arguments) {
+ memset(&message_, 0, arraysize(message_));
+ v8::base::OS::VSNPrintF(&message_[0], arraysize(message_), format,
+ arguments);
+ }
+
+ static const uintptr_t kStartMarker = 0xdecade10;
+ static const uintptr_t kEndMarker = 0xdecade11;
+ static const int kMessageBufferSize = 1024;
+
+ uintptr_t start_marker_ = kStartMarker;
+ char message_[kMessageBufferSize];
+ uintptr_t end_marker_ = kEndMarker;
+};
+
+} // namespace
+
void V8_Fatal(const char* file, int line, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ // Format the error message into a stack object for later retrieveal by the
+ // crash processor.
+ FailureMessage message(format, arguments);
+ va_end(arguments);
+
fflush(stdout);
fflush(stderr);
v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
line);
- va_list arguments;
+
+ // Print the error message.
va_start(arguments, format);
v8::base::OS::VPrintError(format, arguments);
va_end(arguments);
- v8::base::OS::PrintError("\n#\n");
+ // Print the message object's address to force stack allocation.
+ v8::base::OS::PrintError("\n#\n#\n#\n#FailureMessage Object: %p", &message);
if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 5275fdc6a6..a21bc5e423 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -44,11 +44,11 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
//
// We make sure CHECK et al. always evaluates their arguments, as
// doing CHECK(FunctionWithSideEffect()) is a common idiom.
-#define CHECK_WITH_MSG(condition, message) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", message); \
- } \
+#define CHECK_WITH_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ FATAL("Check failed: %s.", message); \
+ } \
} while (0)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
@@ -70,7 +70,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
typename ::v8::base::pass_value_or_ref<decltype(lhs)>::type, \
typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
(lhs), (rhs), #lhs " " #op " " #rhs)) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
+ FATAL("Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
} while (0)
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index a265408d91..9de42131a4 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -338,4 +338,4 @@ bool is_inbounds(float_t v) {
(kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
}
-#endif // V8_BASE_MACROS_H_
+#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index a229745f84..ea32c403ac 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -125,7 +125,7 @@ class Optional {
constexpr Optional() {}
- explicit constexpr Optional(base::nullopt_t) {}
+ constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)
Optional(const Optional& other) {
if (!other.storage_.is_null_) Init(other.value());
@@ -135,10 +135,12 @@ class Optional {
if (!other.storage_.is_null_) Init(std::move(other.value()));
}
- explicit constexpr Optional(const T& value) : storage_(value) {}
+ constexpr Optional(const T& value) // NOLINT(runtime/explicit)
+ : storage_(value) {}
// TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
- explicit Optional(T&& value) : storage_(std::move(value)) {}
+ Optional(T&& value) // NOLINT(runtime/explicit)
+ : storage_(std::move(value)) {}
// TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index eabd53570f..0d4ec9a10d 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -139,7 +139,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base will be nullptr.
if (base != nullptr) break;
}
- DCHECK_EQ(base, aligned_base);
+ DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
}
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 38a7070e85..bba3f1baba 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -131,5 +131,21 @@ void OS::SignalCodeMovingGC() {
UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ const auto kNanosPerMicrosecond = 1000ULL;
+ const auto kMicrosPerSecond = 1000000ULL;
+ const zx_time_t nanos_since_thread_started = zx_clock_get(ZX_CLOCK_THREAD);
+
+ // First convert to microseconds, rounding up.
+ const uint64_t micros_since_thread_started =
+ (nanos_since_thread_started + kNanosPerMicrosecond - 1ULL) /
+ kNanosPerMicrosecond;
+
+ *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
+ *usecs =
+ static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
+ return 0;
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 5edbd7648b..f85f7fe942 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -18,7 +18,6 @@
#include <unistd.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -55,6 +54,12 @@
#include <sys/prctl.h> // NOLINT, for prctl
#endif
+#if defined(V8_OS_FUCHSIA)
+#include <zircon/process.h>
+#else
+#include <sys/resource.h>
+#endif
+
#if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
#include <sys/syscall.h>
#endif
@@ -245,6 +250,10 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFFF000};
#endif
+#elif V8_TARGET_ARCH_MIPS64
+ // We allocate code in 256 MB aligned segments because of optimizations using
+ // J instruction that require that all code is within a single 256 MB segment
+ raw_addr &= uint64_t{0x3FFFE0000000};
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
@@ -474,7 +483,7 @@ int OS::GetCurrentThreadId() {
#elif V8_OS_AIX
return static_cast<int>(thread_self());
#elif V8_OS_FUCHSIA
- return static_cast<int>(pthread_self());
+ return static_cast<int>(zx_thread_self());
#elif V8_OS_SOLARIS
return static_cast<int>(pthread_self());
#else
@@ -487,6 +496,7 @@ int OS::GetCurrentThreadId() {
// POSIX date/time support.
//
+#if !defined(V8_OS_FUCHSIA)
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
struct rusage usage;
@@ -495,7 +505,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
*usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
return 0;
}
-
+#endif
double OS::TimeCurrentMillis() {
return Time::Now().ToJsTime();
@@ -788,7 +798,7 @@ static void InitializeTlsBaseOffset() {
size_t buffer_size = kBufferSize;
int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -822,8 +832,7 @@ static void CheckFastTls(Thread::LocalStorageKey key) {
Thread::SetThreadLocal(key, expected);
void* actual = Thread::GetExistingThreadLocal(key);
if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
+ FATAL("V8 failed to initialize fast TLS on current kernel");
}
Thread::SetThreadLocal(key, nullptr);
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 22580cc407..3f1a586840 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -829,15 +829,14 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base will be nullptr.
if (base != nullptr) break;
}
- DCHECK_EQ(base, aligned_base);
+ DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
}
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
- // TODO(bbudge) Add DCHECK_EQ(0, size % AllocatePageSize()) when callers
- // pass the correct size on Windows.
+ DCHECK_EQ(0, size % AllocatePageSize());
USE(size);
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
@@ -872,6 +871,10 @@ void OS::Sleep(TimeDelta interval) {
void OS::Abort() {
+ // Before aborting, make sure to flush output buffers.
+ fflush(stdout);
+ fflush(stderr);
+
if (g_hard_abort) {
V8_IMMEDIATE_CRASH();
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 1fcd7aecce..cf34af646c 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -143,41 +143,83 @@ TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
int TimeDelta::InDays() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
}
-
int TimeDelta::InHours() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
}
-
int TimeDelta::InMinutes() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
}
-
double TimeDelta::InSecondsF() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
}
-
int64_t TimeDelta::InSeconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ / Time::kMicrosecondsPerSecond;
}
-
double TimeDelta::InMillisecondsF() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
}
-
int64_t TimeDelta::InMilliseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ / Time::kMicrosecondsPerMillisecond;
}
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
+ Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMicroseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_;
+}
int64_t TimeDelta::InNanoseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ * Time::kNanosecondsPerMicrosecond;
}
@@ -415,6 +457,15 @@ struct timeval Time::ToTimeval() const {
#endif // V8_OS_WIN
+// static
+TimeTicks TimeTicks::HighResolutionNow() {
+ // a DCHECK of TimeTicks::IsHighResolution() was removed from here
+ // as it turns out this path is used in the wild for logs and counters.
+ //
+ // TODO(hpayer) We may eventually want to split TimedHistograms based
+ // on low resolution clocks to avoid polluting metrics
+ return TimeTicks::Now();
+}
Time Time::FromJsTime(double ms_since_epoch) {
// The epoch is a valid time, so this constructor doesn't interpret
@@ -447,165 +498,221 @@ std::ostream& operator<<(std::ostream& os, const Time& time) {
#if V8_OS_WIN
-class TickClock {
- public:
- virtual ~TickClock() {}
- virtual int64_t Now() = 0;
- virtual bool IsHighResolution() = 0;
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor. Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() { return timeGetTime(); }
+
+DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
+
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+ // The state as a single 32-bit opaque value.
+ base::Atomic32 as_opaque_32;
+
+ // The state as usable values.
+ struct {
+ // The top 8-bits of the "last" time. This is enough to check for rollovers
+ // and the small bit-size means fewer CompareAndSwap operations to store
+ // changes in state, which in turn makes for fewer retries.
+ uint8_t last_8;
+ // A count of the number of detected rollovers. Using this as bits 47-32
+ // of the upper half of a 64-bit value results in a 48-bit tick counter.
+ // This extends the total rollover period from about 49 days to about 8800
+ // years while still allowing it to be stored with last_8 in a single
+ // 32-bit value.
+ uint16_t rollovers;
+ } as_values;
};
+base::Atomic32 g_last_time_and_rollovers = 0;
+static_assert(sizeof(LastTimeAndRolloversState) <=
+ sizeof(g_last_time_and_rollovers),
+ "LastTimeAndRolloversState does not fit in a single atomic word");
+
+// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days. We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
+TimeTicks RolloverProtectedNow() {
+ LastTimeAndRolloversState state;
+ DWORD now; // DWORD is always unsigned 32 bits.
+
+ while (true) {
+ // Fetch the "now" and "last" tick values, updating "last" with "now" and
+ // incrementing the "rollovers" counter if the tick-value has wrapped back
+ // around. Atomic operations ensure that both "last" and "rollovers" are
+ // always updated together.
+ int32_t original = base::Acquire_Load(&g_last_time_and_rollovers);
+ state.as_opaque_32 = original;
+ now = g_tick_function();
+ uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+ if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
+ state.as_values.last_8 = now_8;
+
+ // If the state hasn't changed, exit the loop.
+ if (state.as_opaque_32 == original) break;
+
+ // Save the changed state. If the existing value is unchanged from the
+ // original, exit the loop.
+ int32_t check = base::Release_CompareAndSwap(&g_last_time_and_rollovers,
+ original, state.as_opaque_32);
+ if (check == original) break;
+
+ // Another thread has done something in between so retry from the top.
+ }
+ return TimeTicks() +
+ TimeDelta::FromMilliseconds(
+ now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
+}
-// Overview of time counters:
+// Discussion of tick counter options on Windows:
+//
// (1) CPU cycle counter. (Retrieved via RDTSC)
// The CPU counter provides the highest resolution time stamp and is the least
-// expensive to retrieve. However, the CPU counter is unreliable and should not
-// be used in production. Its biggest issue is that it is per processor and it
-// is not synchronized between processors. Also, on some computers, the counters
-// will change frequency due to thermal and power changes, and stop in some
-// states.
+// expensive to retrieve. However, on older CPUs, two issues can affect its
+// reliability: First it is maintained per processor and not synchronized
+// between processors. Also, the counters will change frequency due to thermal
+// and power changes, and stop in some states.
//
// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
-// resolution (100 nanoseconds) time stamp but is comparatively more expensive
-// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
-// (with some help from ACPI).
-// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
-// in the worst case, it gets the counter from the rollover interrupt on the
+// resolution (<1 microsecond) time stamp. On most hardware running today, it
+// auto-detects and uses the constant-rate RDTSC counter to provide extremely
+// efficient and reliable time stamps.
+//
+// On older CPUs where RDTSC is unreliable, it falls back to using more
+// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
+// PM timer, and can involve system calls; and all this is up to the HAL (with
+// some help from ACPI). According to
+// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
+// worst case, it gets the counter from the rollover interrupt on the
// programmable interrupt timer. In best cases, the HAL may conclude that the
// RDTSC counter runs at a constant frequency, then it uses that instead. On
// multiprocessor machines, it will try to verify the values returned from
// RDTSC on each processor are consistent with each other, and apply a handful
// of workarounds for known buggy hardware. In other words, QPC is supposed to
-// give consistent result on a multiprocessor computer, but it is unreliable in
-// reality due to bugs in BIOS or HAL on some, especially old computers.
-// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
-// it should be used with caution.
+// give consistent results on a multiprocessor computer, but for older CPUs it
+// can be unreliable due bugs in BIOS or HAL.
//
-// (3) System time. The system time provides a low-resolution (typically 10ms
-// to 55 milliseconds) time stamp but is comparatively less expensive to
-// retrieve and more reliable.
-class HighResolutionTickClock final : public TickClock {
- public:
- explicit HighResolutionTickClock(int64_t ticks_per_second)
- : ticks_per_second_(ticks_per_second) {
- DCHECK_LT(0, ticks_per_second);
+// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
+// milliseconds) time stamp but is comparatively less expensive to retrieve and
+// more reliable. Time::EnableHighResolutionTimer() and
+// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
+// this timer; and also other Windows applications can alter it, affecting this
+// one.
+
+TimeTicks InitialTimeTicksNowFunction();
+
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
+// concurrent reads/writes to these globals has been made safe.
+using TimeTicksNowFunction = decltype(&TimeTicks::Now);
+TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
+int64_t g_qpc_ticks_per_second = 0;
+
+// As of January 2015, use of <atomic> is forbidden in Chromium code. This is
+// what std::atomic_thread_fence does on Windows on all Intel architectures when
+// the memory_order argument is anything but std::memory_order_seq_cst:
+#define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
+
+TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
+ // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
+ // InitializeNowFunctionPointer(), has happened by this point.
+ ATOMIC_THREAD_FENCE(memory_order_acquire);
+
+ DCHECK_GT(g_qpc_ticks_per_second, 0);
+
+ // If the QPC Value is below the overflow threshold, we proceed with
+ // simple multiply and divide.
+ if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
+ return TimeDelta::FromMicroseconds(
+ qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
}
- virtual ~HighResolutionTickClock() {}
-
- int64_t Now() override {
- uint64_t now = QPCNowRaw();
-
- // Intentionally calculate microseconds in a round about manner to avoid
- // overflow and precision issues. Think twice before simplifying!
- int64_t whole_seconds = now / ticks_per_second_;
- int64_t leftover_ticks = now % ticks_per_second_;
- int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
- ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
-
- // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
- // will never return 0.
- return ticks + 1;
- }
-
- bool IsHighResolution() override { return true; }
+ // Otherwise, calculate microseconds in a round about manner to avoid
+ // overflow and precision issues.
+ int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
+ int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
+ return TimeDelta::FromMicroseconds(
+ (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
+ ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
+ g_qpc_ticks_per_second));
+}
- private:
- int64_t ticks_per_second_;
-};
+TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
+bool IsBuggyAthlon(const CPU& cpu) {
+ // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
+ return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
+}
-class RolloverProtectedTickClock final : public TickClock {
- public:
- RolloverProtectedTickClock() : rollover_(0) {}
- virtual ~RolloverProtectedTickClock() {}
-
- int64_t Now() override {
- // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
- // every ~49.7 days. We try to track rollover ourselves, which works if
- // TimeTicks::Now() is called at least every 24 days.
- // Note that we do not use GetTickCount() here, since timeGetTime() gives
- // more predictable delta values, as described here:
- // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for V8 wants fast timers, it
- // can use timeBeginPeriod() to increase the resolution.
- // We use a lock-free version because the sampler thread calls it
- // while having the rest of the world stopped, that could cause a deadlock.
- base::Atomic32 rollover = base::Acquire_Load(&rollover_);
- uint32_t now = static_cast<uint32_t>(timeGetTime());
- if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) {
- base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1);
- ++rollover;
- }
- uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now;
- return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond);
+void InitializeTimeTicksNowFunctionPointer() {
+ LARGE_INTEGER ticks_per_sec = {};
+ if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
+
+ // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+ // the low-resolution clock.
+ //
+ // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
+ // will still use the low-resolution clock. A CPU lacking a non-stop time
+ // counter will cause Windows to provide an alternate QPC implementation that
+ // works, but is expensive to use. Certain Athlon CPUs are known to make the
+ // QPC implementation unreliable.
+ //
+ // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+ // ~72% of users fall within this category.
+ TimeTicksNowFunction now_function;
+ CPU cpu;
+ if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
+ IsBuggyAthlon(cpu)) {
+ now_function = &RolloverProtectedNow;
+ } else {
+ now_function = &QPCNow;
}
- bool IsHighResolution() override { return false; }
-
- private:
- base::Atomic32 rollover_;
-};
-
-
-static LazyStaticInstance<RolloverProtectedTickClock,
- DefaultConstructTrait<RolloverProtectedTickClock>,
- ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-struct CreateHighResTickClockTrait {
- static TickClock* Create() {
- // Check if the installed hardware supports a high-resolution performance
- // counter, and if not fallback to the low-resolution tick clock.
- LARGE_INTEGER ticks_per_second;
- if (!QueryPerformanceFrequency(&ticks_per_second)) {
- return tick_clock.Pointer();
- }
-
- // If QPC not reliable, fallback to low-resolution tick clock.
- if (IsQPCReliable()) {
- return tick_clock.Pointer();
- }
+ // Threading note 1: In an unlikely race condition, it's possible for two or
+ // more threads to enter InitializeNowFunctionPointer() in parallel. This is
+ // not a problem since all threads should end up writing out the same values
+ // to the global variables.
+ //
+ // Threading note 2: A release fence is placed here to ensure, from the
+ // perspective of other threads using the function pointers, that the
+ // assignment to |g_qpc_ticks_per_second| happens before the function pointers
+ // are changed.
+ g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
+ ATOMIC_THREAD_FENCE(memory_order_release);
+ g_time_ticks_now_function = now_function;
+}
- return new HighResolutionTickClock(ticks_per_second.QuadPart);
- }
-};
+TimeTicks InitialTimeTicksNowFunction() {
+ InitializeTimeTicksNowFunctionPointer();
+ return g_time_ticks_now_function();
+}
+#undef ATOMIC_THREAD_FENCE
-static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
- ThreadSafeInitOnceTrait>::type high_res_tick_clock =
- LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+} // namespace
// static
TimeTicks TimeTicks::Now() {
// Make sure we never return 0 here.
- TimeTicks ticks(tick_clock.Pointer()->Now());
+ TimeTicks ticks(g_time_ticks_now_function());
DCHECK(!ticks.IsNull());
return ticks;
}
// static
-TimeTicks TimeTicks::HighResolutionNow() {
- // Make sure we never return 0 here.
- TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
- DCHECK(!ticks.IsNull());
- return ticks;
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return high_res_tick_clock.Pointer()->IsHighResolution();
+bool TimeTicks::IsHighResolution() {
+ if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
+ InitializeTimeTicksNowFunctionPointer();
+ return g_time_ticks_now_function == &QPCNow;
}
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
- return HighResolutionNow();
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
int64_t ticks;
#if V8_OS_MACOSX
static struct mach_timebase_info info;
@@ -627,11 +734,8 @@ TimeTicks TimeTicks::HighResolutionNow() {
return TimeTicks(ticks + 1);
}
-
// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return true;
-}
+bool TimeTicks::IsHighResolution() { return true; }
#endif // V8_OS_WIN
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 25dee1c419..161092ad8b 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_PLATFORM_TIME_H_
#define V8_BASE_PLATFORM_TIME_H_
+#include <stdint.h>
+
#include <ctime>
#include <iosfwd>
#include <limits>
@@ -45,7 +47,7 @@ class TimeBase;
class V8_BASE_EXPORT TimeDelta final {
public:
- TimeDelta() : delta_(0) {}
+ constexpr TimeDelta() : delta_(0) {}
// Converts units of time to TimeDeltas.
static TimeDelta FromDays(int days);
@@ -58,6 +60,27 @@ class V8_BASE_EXPORT TimeDelta final {
}
static TimeDelta FromNanoseconds(int64_t nanoseconds);
+ // Returns the maximum time delta, which should be greater than any reasonable
+ // time delta we might compare it to. Adding or subtracting the maximum time
+ // delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Max();
+
+ // Returns the minimum time delta, which should be less than than any
+ // reasonable time delta we might compare it to. Adding or subtracting the
+ // minimum time delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Min();
+
+ // Returns true if the time delta is zero.
+ constexpr bool IsZero() const { return delta_ == 0; }
+
+ // Returns true if the time delta is the maximum/minimum time delta.
+ constexpr bool IsMax() const {
+ return delta_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool IsMin() const {
+ return delta_ == std::numeric_limits<int64_t>::min();
+ }
+
// Returns the time delta in some unit. The F versions return a floating
// point value, the "regular" versions return a rounded-down value.
//
@@ -71,7 +94,7 @@ class V8_BASE_EXPORT TimeDelta final {
double InMillisecondsF() const;
int64_t InMilliseconds() const;
int64_t InMillisecondsRoundedUp() const;
- int64_t InMicroseconds() const { return delta_; }
+ int64_t InMicroseconds() const;
int64_t InNanoseconds() const;
// Converts to/from Mach time specs.
@@ -103,9 +126,7 @@ class V8_BASE_EXPORT TimeDelta final {
delta_ -= other.delta_;
return *this;
}
- TimeDelta operator-() const {
- return TimeDelta(-delta_);
- }
+ constexpr TimeDelta operator-() const { return TimeDelta(-delta_); }
double TimesOf(const TimeDelta& other) const {
return static_cast<double>(delta_) / static_cast<double>(other.delta_);
@@ -135,22 +156,22 @@ class V8_BASE_EXPORT TimeDelta final {
}
// Comparison operators.
- bool operator==(const TimeDelta& other) const {
+ constexpr bool operator==(const TimeDelta& other) const {
return delta_ == other.delta_;
}
- bool operator!=(const TimeDelta& other) const {
+ constexpr bool operator!=(const TimeDelta& other) const {
return delta_ != other.delta_;
}
- bool operator<(const TimeDelta& other) const {
+ constexpr bool operator<(const TimeDelta& other) const {
return delta_ < other.delta_;
}
- bool operator<=(const TimeDelta& other) const {
+ constexpr bool operator<=(const TimeDelta& other) const {
return delta_ <= other.delta_;
}
- bool operator>(const TimeDelta& other) const {
+ constexpr bool operator>(const TimeDelta& other) const {
return delta_ > other.delta_;
}
- bool operator>=(const TimeDelta& other) const {
+ constexpr bool operator>=(const TimeDelta& other) const {
return delta_ >= other.delta_;
}
@@ -159,12 +180,21 @@ class V8_BASE_EXPORT TimeDelta final {
// Constructs a delta given the duration in microseconds. This is private
// to avoid confusion by callers with an integer constructor. Use
// FromSeconds, FromMilliseconds, etc. instead.
- explicit TimeDelta(int64_t delta) : delta_(delta) {}
+ explicit constexpr TimeDelta(int64_t delta) : delta_(delta) {}
// Delta in microseconds.
int64_t delta_;
};
+// static
+constexpr TimeDelta TimeDelta::Max() {
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
+constexpr TimeDelta TimeDelta::Min() {
+ return TimeDelta(std::numeric_limits<int64_t>::min());
+}
namespace time_internal {
@@ -177,33 +207,52 @@ namespace time_internal {
template<class TimeClass>
class TimeBase {
public:
- static const int64_t kHoursPerDay = 24;
- static const int64_t kMillisecondsPerSecond = 1000;
- static const int64_t kMillisecondsPerDay =
+ static constexpr int64_t kHoursPerDay = 24;
+ static constexpr int64_t kMillisecondsPerSecond = 1000;
+ static constexpr int64_t kMillisecondsPerDay =
kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
- static const int64_t kMicrosecondsPerMillisecond = 1000;
- static const int64_t kMicrosecondsPerSecond =
+ static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
+ static constexpr int64_t kMicrosecondsPerSecond =
kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
- static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
- static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
- static const int64_t kMicrosecondsPerDay =
+ static constexpr int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static constexpr int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static constexpr int64_t kMicrosecondsPerDay =
kMicrosecondsPerHour * kHoursPerDay;
- static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
- static const int64_t kNanosecondsPerMicrosecond = 1000;
- static const int64_t kNanosecondsPerSecond =
+ static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
+ static constexpr int64_t kNanosecondsPerSecond =
kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+#if V8_OS_WIN
+ // To avoid overflow in QPC to Microseconds calculations, since we multiply
+ // by kMicrosecondsPerSecond, then the QPC value should not exceed
+ // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+ static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
+#endif
+
// Returns true if this object has not been initialized.
//
// Warning: Be careful when writing code that performs math on time values,
// since it's possible to produce a valid "zero" result that should not be
// interpreted as a "null" value.
- bool IsNull() const {
- return us_ == 0;
+ constexpr bool IsNull() const { return us_ == 0; }
+
+ // Returns the maximum/minimum times, which should be greater/less than any
+ // reasonable time with which we might compare it.
+ static TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+ static TimeClass Min() {
+ return TimeClass(std::numeric_limits<int64_t>::min());
}
- // Returns true if this object represents the maximum time.
- bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+ // Returns true if this object represents the maximum/minimum time.
+ constexpr bool IsMax() const {
+ return us_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool IsMin() const {
+ return us_ == std::numeric_limits<int64_t>::min();
+ }
// For serializing only. Use FromInternalValue() to reconstitute. Please don't
// use this and do arithmetic on it, as it is more error prone than using the
@@ -263,7 +312,7 @@ class TimeBase {
static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
protected:
- explicit TimeBase(int64_t us) : us_(us) {}
+ explicit constexpr TimeBase(int64_t us) : us_(us) {}
// Time value in a microsecond timebase.
int64_t us_;
@@ -281,7 +330,7 @@ class TimeBase {
class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
// Contains the nullptr time. Use Time::Now() to get the current time.
- Time() : TimeBase(0) {}
+ constexpr Time() : TimeBase(0) {}
// Returns the current time. Watch out, the system might adjust its clock
// in which case time will actually go backwards. We don't guarantee that
@@ -297,10 +346,6 @@ class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
// Returns the time for epoch in Unix-like system (Jan 1, 1970).
static Time UnixEpoch() { return Time(0); }
- // Returns the maximum time, which should be greater than any reasonable time
- // with which we might compare it.
- static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
-
// Converts to/from POSIX time specs.
static Time FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
@@ -320,7 +365,7 @@ class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
private:
friend class time_internal::TimeBase<Time>;
- explicit Time(int64_t us) : TimeBase(us) {}
+ explicit constexpr Time(int64_t us) : TimeBase(us) {}
};
V8_BASE_EXPORT std::ostream& operator<<(std::ostream&, const Time&);
@@ -343,30 +388,29 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
class V8_BASE_EXPORT TimeTicks final
: public time_internal::TimeBase<TimeTicks> {
public:
- TimeTicks() : TimeBase(0) {}
+ constexpr TimeTicks() : TimeBase(0) {}
- // Platform-dependent tick count representing "right now."
- // The resolution of this clock is ~1-15ms. Resolution varies depending
- // on hardware/operating system configuration.
+ // Platform-dependent tick count representing "right now." When
+ // IsHighResolution() returns false, the resolution of the clock could be as
+ // coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+ // microsecond.
// This method never returns a null TimeTicks.
static TimeTicks Now();
- // Returns a platform-dependent high-resolution tick count. Implementation
- // is hardware dependent and may or may not return sub-millisecond
- // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
- // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
- // This method never returns a null TimeTicks.
+ // This is equivalent to Now() but DCHECKs that IsHighResolution(). Useful for
+ // test frameworks that rely on high resolution clocks (in practice all
+ // platforms but low-end Windows devices have high resolution clocks).
static TimeTicks HighResolutionNow();
// Returns true if the high-resolution clock is working on this system.
- static bool IsHighResolutionClockWorking();
+ static bool IsHighResolution();
private:
friend class time_internal::TimeBase<TimeTicks>;
// Please use Now() to create a new object. This is for internal use
// and testing. Ticks are in microseconds.
- explicit TimeTicks(int64_t ticks) : TimeBase(ticks) {}
+ explicit constexpr TimeTicks(int64_t ticks) : TimeBase(ticks) {}
};
inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
@@ -381,7 +425,7 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
class V8_BASE_EXPORT ThreadTicks final
: public time_internal::TimeBase<ThreadTicks> {
public:
- ThreadTicks() : TimeBase(0) {}
+ constexpr ThreadTicks() : TimeBase(0) {}
// Returns true if ThreadTicks::Now() is supported on this system.
static bool IsSupported();
@@ -416,7 +460,7 @@ class V8_BASE_EXPORT ThreadTicks final
// Please use Now() or GetForThread() to create a new object. This is for
// internal use and testing. Ticks are in microseconds.
- explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
+ explicit constexpr ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
#if V8_OS_WIN
// Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
diff --git a/deps/v8/src/base/qnx-math.h b/deps/v8/src/base/qnx-math.h
index 6ff18f8d12..1503c164fa 100644
--- a/deps/v8/src/base/qnx-math.h
+++ b/deps/v8/src/base/qnx-math.h
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#ifndef V8_BASE_QNX_MATH_H_
-#define V8_QBASE_NX_MATH_H_
+#define V8_BASE_QNX_MATH_H_
#include <cmath>
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 28ff780dd3..3d47ebe8f7 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -5,11 +5,13 @@
#include "src/base/sys-info.h"
#if V8_OS_POSIX
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#if !V8_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
#endif
#if V8_OS_BSD
@@ -101,7 +103,7 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfVirtualMemory() {
-#if V8_OS_WIN
+#if V8_OS_WIN || V8_OS_FUCHSIA
return 0;
#elif V8_OS_POSIX
struct rlimit rlim;
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 18850695cb..18b50fe70c 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASE_TEMPLATE_UTILS_H
-#define V8_BASE_TEMPLATE_UTILS_H
+#ifndef V8_BASE_TEMPLATE_UTILS_H_
+#define V8_BASE_TEMPLATE_UTILS_H_
#include <array>
#include <memory>
@@ -131,4 +131,4 @@ constexpr auto fold(Func func, Ts&&... more) ->
} // namespace base
} // namespace v8
-#endif // V8_BASE_TEMPLATE_UTILS_H
+#endif // V8_BASE_TEMPLATE_UTILS_H_
diff --git a/deps/v8/src/base/v8-fallthrough.h b/deps/v8/src/base/v8-fallthrough.h
new file mode 100644
index 0000000000..f61238de06
--- /dev/null
+++ b/deps/v8/src/base/v8-fallthrough.h
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_V8_FALLTHROUGH_H_
+#define V8_BASE_V8_FALLTHROUGH_H_
+
+// When clang suggests inserting [[clang::fallthrough]], it first checks if
+// it knows of a macro expanding to it, and if so suggests inserting the
+// macro. This means that this macro must be used only in code internal
+// to v8, so that v8's user code doesn't end up getting suggestions
+// for V8_FALLTHROUGH instead of the user-specific fallthrough macro.
+// So do not include this header in any of v8's public headers -- only
+// use it in src/, not in include/.
+#if defined(__clang__)
+#define V8_FALLTHROUGH [[clang::fallthrough]] // NOLINT(whitespace/braces)
+#else
+#define V8_FALLTHROUGH
+#endif
+
+#endif // V8_BASE_V8_FALLTHROUGH_H_
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 71d69b20c2..ef87600753 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATAFLOW_H_
-#define V8_DATAFLOW_H_
+#ifndef V8_BIT_VECTOR_H_
+#define V8_BIT_VECTOR_H_
#include "src/allocation.h"
#include "src/zone/zone.h"
@@ -370,4 +370,4 @@ class GrowableBitVector BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_DATAFLOW_H_
+#endif // V8_BIT_VECTOR_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2bc833fe29..ff4beae7fd 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -369,15 +369,15 @@ V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
}
// Construct case.
-V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
- Isolate* isolate, Builtins::Name builtin_id, Handle<String> name,
- Handle<String> instance_class_name, int len) {
+V8_NOINLINE Handle<SharedFunctionInfo>
+SimpleCreateConstructorSharedFunctionInfo(Isolate* isolate,
+ Builtins::Name builtin_id,
+ Handle<String> name, int len) {
Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
const bool kIsConstructor = true;
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
name, code, kIsConstructor, kNormalFunction, builtin_id);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(*instance_class_name);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
@@ -389,9 +389,6 @@ V8_NOINLINE void InstallFunction(Handle<JSObject> target,
Handle<String> function_name,
PropertyAttributes attributes = DONT_ENUM) {
JSObject::AddProperty(target, property_name, function, attributes);
- if (target->IsJSGlobalObject()) {
- function->shared()->set_instance_class_name(*function_name);
- }
}
V8_NOINLINE void InstallFunction(Handle<JSObject> target,
@@ -1279,8 +1276,6 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
isolate(), global_constructor, factory()->the_hole_value(),
ApiNatives::GlobalProxyType);
}
- Handle<String> global_name = factory()->global_string();
- global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
global_proxy_function->initial_map()->set_has_hidden_prototype(true);
global_proxy_function->initial_map()->set_may_have_interesting_symbols(true);
@@ -1347,7 +1342,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<JSFunction> error_fun = InstallFunction(
global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
- error_fun->shared()->set_instance_class_name(*factory->Error_string());
error_fun->shared()->DontAdaptArguments();
error_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, ErrorConstructor));
@@ -1508,9 +1502,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
SimpleInstallFunction(object_function, factory->entries_string(),
- Builtins::kObjectEntries, 1, false);
+ Builtins::kObjectEntries, 1, true);
SimpleInstallFunction(object_function, factory->values_string(),
- Builtins::kObjectValues, 1, false);
+ Builtins::kObjectValues, 1, true);
SimpleInstallFunction(isolate->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1611,50 +1605,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
}
- { // --- A s y n c G e n e r a t o r ---
- Handle<JSFunction> await_caught =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitCaught, 1, false);
- native_context()->set_async_generator_await_caught(*await_caught);
-
- Handle<JSFunction> await_uncaught =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
- native_context()->set_async_generator_await_uncaught(*await_uncaught);
-
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_reject_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorYieldResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_yield_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_resolve_shared_fun(
- *info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_reject_shared_fun(
- *info);
- }
-
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
@@ -1705,6 +1655,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
array_function, "isArray", Builtins::kArrayIsArray, 1, true);
native_context()->set_is_arraylike(*is_arraylike);
+ SimpleInstallFunction(array_function, "from", Builtins::kArrayFrom, 1,
+ false);
+ SimpleInstallFunction(array_function, "of", Builtins::kArrayOf, 0, false);
+
JSObject::AddProperty(proto, factory->constructor_string(), array_function,
DONT_ENUM);
@@ -1768,8 +1722,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
- array_iterator_function->shared()->set_instance_class_name(
- isolate->heap()->ArrayIterator_string());
native_context()->set_initial_array_iterator_prototype(
*array_iterator_prototype);
@@ -1791,6 +1743,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
V(FLOAT32_ARRAY, KEY_VALUE, float32_array, key_value) \
V(FLOAT64_ARRAY, KEY_VALUE, float64_array, key_value) \
V(UINT8_CLAMPED_ARRAY, KEY_VALUE, uint8_clamped_array, key_value) \
+ V(BIGUINT64_ARRAY, KEY_VALUE, biguint64_array, key_value) \
+ V(BIGINT64_ARRAY, KEY_VALUE, bigint64_array, key_value) \
V(FAST_SMI_ARRAY, KEY_VALUE, fast_smi_array, key_value) \
V(FAST_HOLEY_SMI_ARRAY, KEY_VALUE, fast_holey_smi_array, key_value) \
V(FAST_ARRAY, KEY_VALUE, fast_array, key_value) \
@@ -1807,6 +1761,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
V(FLOAT32_ARRAY, VALUE, float32_array, value) \
V(FLOAT64_ARRAY, VALUE, float64_array, value) \
V(UINT8_CLAMPED_ARRAY, VALUE, uint8_clamped_array, value) \
+ V(BIGUINT64_ARRAY, VALUE, biguint64_array, value) \
+ V(BIGINT64_ARRAY, VALUE, bigint64_array, value) \
V(FAST_SMI_ARRAY, VALUE, fast_smi_array, value) \
V(FAST_HOLEY_SMI_ARRAY, VALUE, fast_holey_smi_array, value) \
V(FAST_ARRAY, VALUE, fast_array, value) \
@@ -2102,9 +2058,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
false);
SimpleInstallFunction(prototype, "trimLeft",
- Builtins::kStringPrototypeTrimLeft, 0, false);
+ Builtins::kStringPrototypeTrimStart, 0, false);
SimpleInstallFunction(prototype, "trimRight",
- Builtins::kStringPrototypeTrimRight, 0, false);
+ Builtins::kStringPrototypeTrimEnd, 0, false);
#ifdef V8_INTL_SUPPORT
SimpleInstallFunction(prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
@@ -2355,16 +2311,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info = SimpleCreateConstructorSharedFunctionInfo(
isolate, Builtins::kPromiseGetCapabilitiesExecutor,
- factory->empty_string(), factory->Object_string(), 2);
+ factory->empty_string(), 2);
native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
-
- // %new_promise_capability(C, debugEvent)
- Handle<JSFunction> new_promise_capability =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kNewPromiseCapability, 2, false);
- native_context()->set_new_promise_capability(*new_promise_capability);
}
{ // -- P r o m i s e
@@ -2376,7 +2326,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Object_string());
shared->set_internal_formal_parameter_count(1);
shared->set_length(1);
@@ -2387,7 +2336,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(promise_fun, "race", Builtins::kPromiseRace, 1, true);
SimpleInstallFunction(promise_fun, "resolve",
- Builtins::kPromiseResolveWrapper, 1, true);
+ Builtins::kPromiseResolveTrampoline, 1, true);
SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
true);
@@ -2395,6 +2344,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup %PromisePrototype%.
Handle<JSObject> prototype(
JSObject::cast(promise_fun->instance_prototype()));
+ native_context()->set_promise_prototype(*prototype);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
@@ -2423,64 +2373,25 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
- // Store the initial Promise.prototype map. This is used in fast-path
- // checks. Do not alter the prototype after this point.
- native_context()->set_promise_prototype_map(*prototype_map);
-
- { // Internal: PromiseInternalConstructor
- // Also exposed as extrasUtils.createPromise.
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseInternalConstructor, 1, true);
- function->shared()->set_native(false);
- native_context()->set_promise_internal_constructor(*function);
- }
-
{ // Internal: IsPromise
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
native_context()->set_is_promise(*function);
}
- { // Internal: ResolvePromise
- // Also exposed as extrasUtils.resolvePromise.
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
- function->shared()->set_native(false);
- native_context()->set_promise_resolve(*function);
- }
-
- { // Internal: PromiseHandle
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseHandleJS, 5, false);
- native_context()->set_promise_handle(*function);
- }
-
- { // Internal: PromiseHandleReject
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseHandleReject, 3, false);
- native_context()->set_promise_handle_reject(*function);
- }
-
- { // Internal: InternalPromiseReject
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kInternalPromiseReject, 3, true);
- function->shared()->set_native(false);
- native_context()->set_promise_internal_reject(*function);
- }
-
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseResolveClosure, factory->empty_string(),
- 1);
- native_context()->set_promise_resolve_shared_fun(*info);
+ isolate, Builtins::kPromiseCapabilityDefaultResolve,
+ factory->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_capability_default_resolve_shared_fun(
+ *info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseRejectClosure, factory->empty_string(), 1);
- native_context()->set_promise_reject_shared_fun(*info);
+ isolate, Builtins::kPromiseCapabilityDefaultReject,
+ factory->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_capability_default_reject_shared_fun(*info);
}
{
@@ -2512,7 +2423,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->RegExp_string());
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
@@ -2962,7 +2872,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
#endif // V8_INTL_SUPPORT
{ // -- A r r a y B u f f e r
- Handle<String> name = factory->InternalizeUtf8String("ArrayBuffer");
+ Handle<String> name = factory->ArrayBuffer_string();
Handle<JSFunction> array_buffer_fun = CreateArrayBuffer(name, ARRAY_BUFFER);
JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
@@ -2978,7 +2888,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- S h a r e d A r r a y B u f f e r
- Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
+ Handle<String> name = factory->SharedArrayBuffer_string();
Handle<JSFunction> shared_array_buffer_fun =
CreateArrayBuffer(name, SHARED_ARRAY_BUFFER);
InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
@@ -3025,6 +2935,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(typed_array_fun);
native_context()->set_typed_array_function(*typed_array_fun);
+ SimpleInstallFunction(typed_array_fun, "of", Builtins::kTypedArrayOf, 0,
+ false);
+ SimpleInstallFunction(typed_array_fun, "from", Builtins::kTypedArrayFrom, 1,
+ false);
+
// Setup %TypedArrayPrototype%.
Handle<JSObject> prototype(
JSObject::cast(typed_array_fun->instance_prototype()));
@@ -3068,6 +2983,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeEvery, 1, false);
SimpleInstallFunction(prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
+ SimpleInstallFunction(prototype, "filter",
+ Builtins::kTypedArrayPrototypeFilter, 1, false);
SimpleInstallFunction(prototype, "find", Builtins::kTypedArrayPrototypeFind,
1, false);
SimpleInstallFunction(prototype, "findIndex",
@@ -3094,6 +3011,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeSlice, 2, false);
SimpleInstallFunction(prototype, "some", Builtins::kTypedArrayPrototypeSome,
1, false);
+ SimpleInstallFunction(prototype, "subarray",
+ Builtins::kTypedArrayPrototypeSubArray, 2, false);
}
{ // -- T y p e d A r r a y s
@@ -3106,29 +3025,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
-
- // %typed_array_construct_by_length
- Handle<JSFunction> construct_by_length = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByLength"),
- Builtins::kTypedArrayConstructByLength, 3, false);
- native_context()->set_typed_array_construct_by_length(*construct_by_length);
-
- // %typed_array_construct_by_array_buffer
- Handle<JSFunction> construct_by_buffer = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByArrayBuffer"),
- Builtins::kTypedArrayConstructByArrayBuffer, 5, false);
- native_context()->set_typed_array_construct_by_array_buffer(
- *construct_by_buffer);
-
- // %typed_array_construct_by_array_like
- Handle<JSFunction> construct_by_array_like = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByArrayLike"),
- Builtins::kTypedArrayConstructByArrayLike, 4, false);
- native_context()->set_typed_array_construct_by_array_like(
- *construct_by_array_like);
}
{ // -- D a t a V i e w
@@ -3219,7 +3115,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Map_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3263,6 +3158,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
BuiltinFunctionId::kMapSize);
SimpleInstallFunction(prototype, "values", Builtins::kMapPrototypeValues, 0,
true);
+
+ native_context()->set_initial_map_prototype_map(prototype->map());
+
InstallSpeciesGetter(js_map_fun);
}
@@ -3275,7 +3173,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Set_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3314,6 +3211,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(prototype, factory->keys_string(), values, DONT_ENUM);
JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
DONT_ENUM);
+
+ native_context()->set_initial_set_prototype_map(prototype->map());
+
InstallSpeciesGetter(js_set_fun);
}
@@ -3369,7 +3269,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->WeakMap_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3380,13 +3279,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kWeakMapPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "get", Builtins::kWeakMapGet, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakMapHas, 1, true);
- SimpleInstallFunction(prototype, "set", Builtins::kWeakMapPrototypeSet, 2,
- true);
+ Handle<JSFunction> weakmap_set = SimpleInstallFunction(
+ prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
+ native_context()->set_weakmap_set(*weakmap_set);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakMap"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ native_context()->set_initial_weakmap_prototype_map(prototype->map());
}
{ // -- W e a k S e t
@@ -3398,7 +3300,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->WeakSet_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3408,13 +3309,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "delete",
Builtins::kWeakSetPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakSetHas, 1, true);
- SimpleInstallFunction(prototype, "add", Builtins::kWeakSetPrototypeAdd, 1,
- true);
+ Handle<JSFunction> weakset_add = SimpleInstallFunction(
+ prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
+ native_context()->set_weakset_add(*weakset_add);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakSet"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ native_context()->set_initial_weakset_prototype_map(prototype->map());
}
{ // -- P r o x y
@@ -3541,16 +3445,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- sloppy arguments map
- // Make sure we can recognize argument objects at runtime.
- // This is done by introducing an anonymous function with
- // class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
arguments_string, BUILTIN_CODE(isolate, Illegal),
isolate->initial_object_prototype(), JS_ARGUMENTS_TYPE,
JSSloppyArgumentsObject::kSize, 2, Builtins::kIllegal, MUTABLE);
Handle<JSFunction> function = factory->NewFunction(args);
- function->shared()->set_instance_class_name(*arguments_string);
Handle<Map> map(function->initial_map());
// Create the descriptor array for the arguments object.
@@ -3641,8 +3541,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate, factory->empty_string(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE, JSObject::kHeaderSize,
0, factory->the_hole_value(), Builtins::kIllegal);
- Handle<String> name = factory->InternalizeUtf8String("context_extension");
- context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -3674,9 +3572,14 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
Handle<JSFunction> result = InstallFunction(
global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
- 0, factory()->the_hole_value(), Builtins::kIllegal);
+ 0, factory()->the_hole_value(), Builtins::kTypedArrayConstructor);
result->initial_map()->set_elements_kind(elements_kind);
+ result->shared()->DontAdaptArguments();
+ result->shared()->set_length(3);
+ result->shared()->SetConstructStub(
+ *BUILTIN_CODE(isolate_, TypedArrayConstructor_ConstructStub));
+
CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
.FromJust());
@@ -3757,25 +3660,15 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<Object> argv[],
NativesFlag natives_flag) {
SuppressDebug compiling_natives(isolate->debug());
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
- isolate->StackOverflow();
- return false;
- }
Handle<Context> context(isolate->context());
-
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- natives_flag, MaybeHandle<FixedArray>());
+ source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
+ nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, natives_flag);
Handle<SharedFunctionInfo> function_info;
if (!maybe_function_info.ToHandle(&function_info)) return false;
@@ -3838,11 +3731,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, extension, nullptr,
- ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE,
- MaybeHandle<FixedArray>());
+ source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
+ extension, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(name, function_info);
}
@@ -4012,7 +3903,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S e t I t e r a t o r
- Handle<String> name = factory->InternalizeUtf8String("Set Iterator");
+ Handle<String> name = factory->SetIterator_string();
// Setup %SetIteratorPrototype%.
Handle<JSObject> prototype =
@@ -4034,7 +3925,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
InstallFunction(container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
set_iterator_function->shared()->set_native(false);
- set_iterator_function->shared()->set_instance_class_name(*name);
Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
isolate);
@@ -4048,7 +3938,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- M a p I t e r a t o r
- Handle<String> name = factory->InternalizeUtf8String("Map Iterator");
+ Handle<String> name = factory->MapIterator_string();
// Setup %MapIteratorPrototype%.
Handle<JSObject> prototype =
@@ -4070,7 +3960,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
InstallFunction(container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
map_iterator_function->shared()->set_native(false);
- map_iterator_function->shared()->set_instance_class_name(*name);
Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
isolate);
@@ -4089,11 +3978,10 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S c r i p t
- Handle<String> name = factory->InternalizeUtf8String("Script");
+ Handle<String> name = factory->Script_string();
Handle<JSFunction> script_fun = InstallFunction(
container, name, JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kUnsupportedThrower, DONT_ENUM);
- script_fun->shared()->set_instance_class_name(*name);
native_context->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -4226,34 +4114,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitCaught, 2, false);
- native_context->set_async_function_await_caught(*function);
- }
-
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitUncaught, 2, false);
- native_context->set_async_function_await_uncaught(*function);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_reject_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_resolve_shared_fun(*info);
- }
-
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseCreate, 0, false);
native_context->set_async_function_promise_create(*function);
}
@@ -4344,7 +4204,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
@@ -4354,6 +4213,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_subsume_json)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4390,6 +4250,41 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
}
}
+void Genesis::InitializeGlobal_harmony_string_trimming() {
+ if (!FLAG_harmony_string_trimming) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSObject> string_prototype(
+ native_context()->initial_string_prototype());
+
+ {
+ Handle<String> trim_left_name = factory->InternalizeUtf8String("trimLeft");
+ Handle<String> trim_start_name =
+ factory->InternalizeUtf8String("trimStart");
+ Handle<JSFunction> trim_left_fun = Handle<JSFunction>::cast(
+ JSObject::GetProperty(string_prototype, trim_left_name)
+ .ToHandleChecked());
+ JSObject::AddProperty(string_prototype, trim_start_name, trim_left_fun,
+ DONT_ENUM);
+ trim_left_fun->shared()->set_name(*trim_start_name);
+ }
+
+ {
+ Handle<String> trim_right_name =
+ factory->InternalizeUtf8String("trimRight");
+ Handle<String> trim_end_name = factory->InternalizeUtf8String("trimEnd");
+ Handle<JSFunction> trim_right_fun = Handle<JSFunction>::cast(
+ JSObject::GetProperty(string_prototype, trim_right_name)
+ .ToHandleChecked());
+ JSObject::AddProperty(string_prototype, trim_end_name, trim_right_fun,
+ DONT_ENUM);
+ trim_right_fun->shared()->set_name(*trim_end_name);
+ }
+}
+
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -4423,7 +4318,6 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
// to prototype, so we update the saved map.
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
- native_context()->set_promise_prototype_map(*prototype_map);
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
@@ -4456,10 +4350,19 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
void Genesis::InitializeGlobal_harmony_bigint() {
- if (!FLAG_harmony_bigint) return;
-
Factory* factory = isolate()->factory();
Handle<JSGlobalObject> global(native_context()->global_object());
+ if (!FLAG_harmony_bigint) {
+ // Typed arrays are installed by default; remove them if the flag is off.
+ CHECK(JSObject::DeleteProperty(
+ global, factory->InternalizeUtf8String("BigInt64Array"))
+ .ToChecked());
+ CHECK(JSObject::DeleteProperty(
+ global, factory->InternalizeUtf8String("BigUint64Array"))
+ .ToChecked());
+ return;
+ }
+
Handle<JSFunction> bigint_fun =
InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
@@ -4474,9 +4377,6 @@ void Genesis::InitializeGlobal_harmony_bigint() {
Context::BIGINT_FUNCTION_INDEX);
// Install the properties of the BigInt constructor.
- // parseInt(string, radix)
- SimpleInstallFunction(bigint_fun, "parseInt", Builtins::kBigIntParseInt, 2,
- false);
// asUintN(bits, bigint)
SimpleInstallFunction(bigint_fun, "asUintN", Builtins::kBigIntAsUintN, 2,
false);
@@ -4503,6 +4403,20 @@ void Genesis::InitializeGlobal_harmony_bigint() {
JSObject::AddProperty(prototype, factory->to_string_tag_symbol(),
factory->BigInt_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Install 64-bit DataView accessors.
+ // TODO(jkummerow): Move these to the "DataView" section when dropping the
+ // FLAG_harmony_bigint.
+ Handle<JSObject> dataview_prototype(
+ JSObject::cast(native_context()->data_view_fun()->instance_prototype()));
+ SimpleInstallFunction(dataview_prototype, "getBigInt64",
+ Builtins::kDataViewPrototypeGetBigInt64, 1, false);
+ SimpleInstallFunction(dataview_prototype, "setBigInt64",
+ Builtins::kDataViewPrototypeSetBigInt64, 2, false);
+ SimpleInstallFunction(dataview_prototype, "getBigUint64",
+ Builtins::kDataViewPrototypeGetBigUint64, 1, false);
+ SimpleInstallFunction(dataview_prototype, "setBigUint64",
+ Builtins::kDataViewPrototypeSetBigUint64, 2, false);
}
#ifdef V8_INTL_SUPPORT
@@ -4554,7 +4468,6 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
array_buffer_fun->shared()->SetConstructStub(*code);
array_buffer_fun->shared()->DontAdaptArguments();
array_buffer_fun->shared()->set_length(1);
- array_buffer_fun->shared()->set_instance_class_name(*name);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory()->constructor_string(),
@@ -4650,10 +4563,30 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallInternalArray(extras_utils, "InternalPackedArray", PACKED_ELEMENTS);
- InstallFunction(extras_utils, isolate()->promise_internal_constructor(),
+ // v8.createPromise(parent)
+ Handle<JSFunction> promise_internal_constructor =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalConstructor, 1, true);
+ promise_internal_constructor->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_constructor,
factory()->NewStringFromAsciiChecked("createPromise"));
- InstallFunction(extras_utils, isolate()->promise_resolve(),
+
+ // v8.rejectPromise(promise, reason)
+ Handle<JSFunction> promise_internal_reject =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalReject, 2, true);
+ promise_internal_reject->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_reject,
+ factory()->NewStringFromAsciiChecked("rejectPromise"));
+
+ // v8.resolvePromise(promise, resolution)
+ Handle<JSFunction> promise_internal_resolve =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalResolve, 2, true);
+ promise_internal_resolve->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_resolve,
factory()->NewStringFromAsciiChecked("resolvePromise"));
+
InstallFunction(extras_utils, isolate()->is_promise(),
factory()->NewStringFromAsciiChecked("isPromise"));
@@ -4699,8 +4632,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_fast_template_instantiations_cache(
*fast_template_instantiations_cache);
- auto slow_template_instantiations_cache =
- NumberDictionary::New(isolate(), ApiNatives::kInitialFunctionCacheSize);
+ auto slow_template_instantiations_cache = SimpleNumberDictionary::New(
+ isolate(), ApiNatives::kInitialFunctionCacheSize);
native_context()->set_slow_template_instantiations_cache(
*slow_template_instantiations_cache);
@@ -5272,6 +5205,11 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_js_map_map(js_map_fun->initial_map());
native_context()->set_js_set_map(js_set_fun->initial_map());
+ Handle<JSFunction> js_array_constructor(native_context()->array_function());
+ Handle<JSObject> js_array_prototype(
+ JSObject::cast(js_array_constructor->instance_prototype()));
+ native_context()->set_initial_array_prototype_map(js_array_prototype->map());
+
return true;
}
@@ -5434,15 +5372,6 @@ Genesis::Genesis(
// on all function exits.
SaveContext saved_context(isolate);
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return;
- }
-
// The deserializer needs to hook up references to the global proxy.
// Create an uninitialized global proxy now if we don't have one
// and initialize it later in CreateNewGlobals.
@@ -5571,15 +5500,6 @@ Genesis::Genesis(Isolate* isolate,
// on all function exits.
SaveContext saved_context(isolate);
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return;
- }
-
const int proxy_size = JSGlobalProxy::SizeWithEmbedderFields(
global_proxy_template->InternalFieldCount());
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 8afd0a0601..a554496dfd 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -25,7 +25,7 @@ class SourceCodeCache final BASE_EMBEDDED {
void Initialize(Isolate* isolate, bool create_heap_objects);
void Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kExtensions,
+ v->VisitRootPointer(Root::kExtensions, nullptr,
bit_cast<Object**, FixedArray**>(&cache_));
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2b2b9c2b34..1ea0bb733b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -156,13 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -190,6 +183,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ pop(r0);
__ SmiUntag(r0, r0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -297,7 +291,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -417,7 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -559,9 +553,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(scratch);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -828,9 +823,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmp(bytecode, Operand(0x1));
- __ b(hi, &load_size);
+ __ b(hi, &process_bytecode);
__ b(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -878,8 +877,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ b(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -907,7 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1008,11 +1015,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Call(r4);
+ __ ldr(
+ kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1025,16 +1033,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ b(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1215,13 +1220,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Jump(scratch);
+ __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
+ kPointerSizeLog2));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1237,14 +1243,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1262,7 +1274,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1275,7 +1287,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1304,7 +1320,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1491,9 +1507,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
namespace {
@@ -1978,7 +1995,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2449,9 +2466,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0 : expected number of arguments
// r1 : function (passed through to callee)
// r3 : new target (passed through to callee)
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2464,9 +2482,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index dd92af89bb..54d2524d6e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -180,6 +173,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(x0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
@@ -332,7 +326,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kDerivedConstructorMask,
+ __ TestAndBranchIfAnySet(w4,
+ SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -460,11 +455,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ TestAndBranchIfAllClear(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
} else {
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
__ CallRuntime(
Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
@@ -552,7 +547,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -617,9 +612,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
- __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x5, x5, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x5);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -663,7 +659,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
+ __ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
@@ -745,7 +741,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Poke the result into the stack.
__ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(lt, &loop);
__ Bind(&done);
@@ -920,9 +916,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -936,10 +933,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -949,11 +949,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Cmp(bytecode, Operand(0x1));
- __ B(hi, &load_size);
+ __ B(hi, &process_bytecode);
__ B(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -961,7 +961,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
+ __ B(&process_bytecode);
__ Bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -970,8 +970,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ Bind(&load_size);
+ __ Bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ B(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -998,7 +1006,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1009,7 +1017,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1022,7 +1030,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
@@ -1060,7 +1068,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ Sub(x10, __ StackPointer(), Operand(x11));
+ __ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1101,11 +1109,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Call(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1118,16 +1127,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ B(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &do_return);
__ B(&do_dispatch);
__ bind(&do_return);
@@ -1336,11 +1342,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Jump(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1356,14 +1363,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1381,7 +1394,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1394,7 +1407,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1423,7 +1440,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1613,7 +1630,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
+ __ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1646,7 +1663,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, __ StackPointer(), frame_size);
+ __ Add(fp, sp, frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1682,7 +1699,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
// Restore fp, lr.
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Call builtin.
@@ -2090,8 +2107,7 @@ void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
__ Push(x11, padreg);
- __ Add(fp, __ StackPointer(),
- ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2101,7 +2117,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2194,7 +2210,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2341,7 +2357,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2467,7 +2483,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2539,8 +2555,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, __ StackPointer(),
- Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
__ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
@@ -2825,7 +2840,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x10;
Label dont_adapt_arguments, stack_overflow;
@@ -2854,7 +2868,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, __ StackPointer());
+ __ Mov(copy_to, sp);
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2918,8 +2932,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2927,9 +2940,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x0 : expected number of arguments
// x1 : function (passed through to callee)
// x3 : new target (passed through to callee)
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2941,9 +2955,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d50e045069..971fb7c678 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -98,18 +98,13 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
if (!raw_call_data->IsUndefined(isolate)) {
DCHECK(raw_call_data->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
*new_target, &args[0] - 1,
args.length() - 1);
-
- Handle<Object> result = custom.Call(callback);
+ Handle<Object> result = custom.Call(call_data);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) {
@@ -154,7 +149,7 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable {
virtual inline void IterateInstance(RootVisitor* v) {
if (length() == 0) return;
- v->VisitRootPointers(Root::kRelocatable, lowest_address(),
+ v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(),
highest_address() + 1);
}
@@ -256,12 +251,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Object* handler =
constructor->shared()->get_api_func_data()->instance_call_handler();
DCHECK(!handler->IsUndefined(isolate));
- // TODO(ishell): remove this debugging code.
- CHECK(handler->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -272,7 +262,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
obj, new_target, &args[0] - 1,
args.length() - 1);
- Handle<Object> result_handle = custom.Call(callback);
+ Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = isolate->heap()->undefined_value();
} else {
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 7db8b971d7..2bf5e1c343 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -44,7 +44,7 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
Node* formal_parameter_count =
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
MachineType::Int32());
- formal_parameter_count = Word32ToParameter(formal_parameter_count, mode);
+ formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 027baa2873..52a6222882 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -2,53 +2,50 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-string-gen.h"
+#include "src/builtins/builtins-typedarray-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/factory-inl.h"
#include "src/frame-constants.h"
+#include "src/builtins/builtins-array-gen.h"
+
namespace v8 {
namespace internal {
-class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
- public:
- explicit ArrayBuiltinCodeStubAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state),
- k_(this, MachineRepresentation::kTagged),
- a_(this, MachineRepresentation::kTagged),
- to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
- fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- BuiltinResultGenerator;
-
- typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
- Node* k_value, Node* k)>
- CallResultProcessor;
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- PostLoopAction;
+using Node = compiler::Node;
- enum class MissingPropertyMode { kSkip, kUseUndefined };
+ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ k_(this, MachineRepresentation::kTagged),
+ a_(this, MachineRepresentation::kTagged),
+ to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
+ fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
- void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
+void ArrayBuiltinsAssembler::FindResultGenerator() {
+ a_.Bind(UndefinedConstant());
+}
- Node* FindProcessor(Node* k_value, Node* k) {
- Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
- this_arg(), k_value, k, o());
- Label false_continue(this), return_true(this);
- BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
- BIND(&return_true);
- ReturnFromBuiltin(k_value);
- BIND(&false_continue);
- return a();
+Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
+ Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
+ this_arg(), k_value, k, o());
+ Label false_continue(this), return_true(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
+ BIND(&return_true);
+ ReturnFromBuiltin(k_value);
+ BIND(&false_continue);
+ return a();
}
- void FindIndexResultGenerator() { a_.Bind(SmiConstant(-1)); }
+ void ArrayBuiltinsAssembler::FindIndexResultGenerator() {
+ a_.Bind(SmiConstant(-1));
+ }
- Node* FindIndexProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FindIndexProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -59,17 +56,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
+ void ArrayBuiltinsAssembler::ForEachResultGenerator() {
+ a_.Bind(UndefinedConstant());
+ }
- Node* ForEachProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ForEachProcessor(Node* k_value, Node* k) {
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
k_value, k, o());
return a();
}
- void SomeResultGenerator() { a_.Bind(FalseConstant()); }
+ void ArrayBuiltinsAssembler::SomeResultGenerator() {
+ a_.Bind(FalseConstant());
+ }
- Node* SomeProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SomeProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -80,9 +81,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void EveryResultGenerator() { a_.Bind(TrueConstant()); }
+ void ArrayBuiltinsAssembler::EveryResultGenerator() {
+ a_.Bind(TrueConstant());
+ }
- Node* EveryProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::EveryProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label true_continue(this), return_false(this);
@@ -93,9 +96,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ReduceResultGenerator() { return a_.Bind(this_arg()); }
+ void ArrayBuiltinsAssembler::ReduceResultGenerator() {
+ return a_.Bind(this_arg());
+ }
- Node* ReduceProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ReduceProcessor(Node* k_value, Node* k) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, {&result}), initial(this);
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
@@ -111,21 +116,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void ReducePostLoopAction() {
+ void ArrayBuiltinsAssembler::ReducePostLoopAction() {
Label ok(this);
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
BIND(&ok);
}
- void FilterResultGenerator() {
+ void ArrayBuiltinsAssembler::FilterResultGenerator() {
// 7. Let A be ArraySpeciesCreate(O, 0).
// This version of ArraySpeciesCreate will create with the correct
// ElementsKind in the fast case.
- ArraySpeciesCreate();
+ GenerateArraySpeciesCreate();
}
- Node* FilterProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FilterProcessor(Node* k_value, Node* k) {
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -191,11 +196,19 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void MapResultGenerator() { ArraySpeciesCreate(len_); }
+ void ArrayBuiltinsAssembler::MapResultGenerator() {
+ GenerateArraySpeciesCreate(len_);
+ }
- void TypedArrayMapResultGenerator() {
+ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
- Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
+ TNode<JSTypedArray> original_array = CAST(o());
+ TNode<Smi> length = CAST(len_);
+ const char* method_name = "%TypedArray%.prototype.map";
+
+ TypedArrayBuiltinsAssembler typedarray_asm(state());
+ TNode<JSTypedArray> a = typedarray_asm.SpeciesCreateByLength(
+ context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
CSA_ASSERT(this,
@@ -206,7 +219,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
a_.Bind(a);
}
- Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SpecCompliantMapProcessor(Node* k_value,
+ Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// SpecCompliantMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -218,7 +232,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- Node* FastMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FastMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// FastMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -312,7 +326,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
- Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -325,11 +339,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Branch(fast_typed_array_target_, &fast, &slow);
BIND(&fast);
- // #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
- Node* num_value = ToNumber(context(), mapped_value);
+ // #sec-integerindexedelementset
+ // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // numValue be ? ToBigInt(v).
+ // 6. Otherwise, let numValue be ? ToNumber(value).
+ Node* num_value;
+ if (source_elements_kind_ == BIGINT64_ELEMENTS ||
+ source_elements_kind_ == BIGUINT64_ELEMENTS) {
+ num_value = ToBigInt(context(), mapped_value);
+ } else {
+ num_value = ToNumber_Inline(context(), mapped_value);
+ }
// The only way how this can bailout is because of a detached buffer.
EmitElementStore(a(), k, num_value, false, source_elements_kind_,
- KeyedAccessStoreMode::STANDARD_STORE, &detached);
+ KeyedAccessStoreMode::STANDARD_STORE, &detached,
+ context());
Goto(&done);
BIND(&slow);
@@ -339,28 +363,16 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&detached);
// tc39.github.io/ecma262/#sec-integerindexedelementset
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&done);
return a();
}
- void NullPostLoopAction() {}
+ void ArrayBuiltinsAssembler::NullPostLoopAction() {}
- protected:
- Node* context() { return context_; }
- Node* receiver() { return receiver_; }
- Node* new_target() { return new_target_; }
- Node* argc() { return argc_; }
- Node* o() { return o_; }
- Node* len() { return len_; }
- Node* callbackfn() { return callbackfn_; }
- Node* this_arg() { return this_arg_; }
- Node* k() { return k_.value(); }
- Node* a() { return a_.value(); }
-
- void ReturnFromBuiltin(Node* value) {
+ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
if (argc_ == nullptr) {
Return(value);
} else {
@@ -370,9 +382,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
- Node* callbackfn, Node* this_arg,
- Node* new_target, Node* argc) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target, TNode<IntPtrT> argc) {
context_ = context;
receiver_ = receiver;
new_target_ = new_target;
@@ -381,12 +393,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
argc_ = argc;
}
- void GenerateIteratingArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
// TODO(danno): Seriously? Do we really need to throw the exact error
@@ -398,7 +409,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = CallBuiltin(Builtins::kToObject, context(), receiver());
+ o_ = ToObject(context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -453,15 +464,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
ReturnFromBuiltin(result);
}
- void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
- Node* callbackfn,
- Node* this_arg, Node* a,
- Node* o, Node* initial_k,
- Node* len, Node* to) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to) {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
- argc_ = nullptr;
a_.Bind(a);
k_.Bind(initial_k);
o_ = o;
@@ -469,10 +477,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
to_.Bind(to);
}
- void GenerateIteratingTypedArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
- ForEachDirection direction = ForEachDirection::kForward) {
+ ForEachDirection direction) {
name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -525,6 +533,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&distinguish_types);
+ generator(this);
+
if (direction == ForEachDirection::kForward) {
k_.Bind(SmiConstant(0));
} else {
@@ -539,7 +549,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label done(this);
source_elements_kind_ = ElementsKindForInstanceType(
static_cast<InstanceType>(instance_types[i]));
- generator(this);
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
@@ -552,10 +561,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void GenerateIteratingArrayBuiltinLoopContinuation(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinLoopContinuation(
const CallResultProcessor& processor, const PostLoopAction& action,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label loop(this, {&k_, &a_, &to_});
Label after_loop(this);
Goto(&loop);
@@ -613,8 +621,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Return(a_.value());
}
- private:
- static ElementsKind ElementsKindForInstanceType(InstanceType type) {
+ ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
+ InstanceType type) {
switch (type) {
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
@@ -628,9 +636,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void VisitAllTypedArrayElements(Node* array_buffer,
- const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction) {
+ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
+ Node* array_buffer, const CallResultProcessor& processor, Label* detached,
+ ForEachDirection direction) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
@@ -660,11 +668,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
advance_mode);
}
- void VisitAllFastElementsOneKind(ElementsKind kind,
- const CallResultProcessor& processor,
- Label* array_changed, ParameterMode mode,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
+ ElementsKind kind, const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
@@ -735,10 +742,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Comment("end VisitAllFastElementsOneKind");
}
- void HandleFastElements(const CallResultProcessor& processor,
- const PostLoopAction& action, Label* slow,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::HandleFastElements(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ Label* slow, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Label switch_on_elements_kind(this), fast_elements(this),
maybe_double_elements(this), fast_double_elements(this);
@@ -788,7 +795,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
// This version is specialized to create a zero length array
// of the elements kind of the input array.
- void ArraySpeciesCreate() {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate() {
Label runtime(this, Label::kDeferred), done(this);
TNode<Smi> len = SmiConstant(0);
@@ -810,9 +817,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Respect the ElementsKind of the input array.
TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
TNode<JSArray> array =
CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
nullptr, CodeStubAssembler::SMI_PARAMETERS));
@@ -834,7 +841,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArraySpeciesCreate(SloppyTNode<Smi> len) {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(
+ SloppyTNode<Smi> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
@@ -860,9 +868,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
@@ -881,30 +889,14 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&done);
}
- Node* callbackfn_ = nullptr;
- Node* o_ = nullptr;
- Node* this_arg_ = nullptr;
- Node* len_ = nullptr;
- Node* context_ = nullptr;
- Node* receiver_ = nullptr;
- Node* new_target_ = nullptr;
- Node* argc_ = nullptr;
- Node* fast_typed_array_target_ = nullptr;
- const char* name_ = nullptr;
- Variable k_;
- Variable a_;
- Variable to_;
- Label fully_spec_compliant_;
- ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
-};
-
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -920,16 +912,18 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -945,10 +939,10 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
capacity),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(Int32LessThanOrEqual(elements_kind,
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
@@ -1008,12 +1002,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSArray> array_receiver;
Node* kind = nullptr;
Label fast(this);
@@ -1021,13 +1017,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&fast);
{
+ array_receiver = CAST(receiver);
arg_index = IntPtrConstant(0);
- kind = EnsureArrayPushable(receiver, &runtime);
+ kind = EnsureArrayPushable(array_receiver, &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, receiver, &args,
- &arg_index, &smi_transition);
+ Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver,
+ &args, &arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -1037,17 +1034,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// the most generic implementation for the rest of the array.
BIND(&smi_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIf(TaggedIsSmi(arg), &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1065,16 +1062,16 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, receiver, &args,
- &arg_index, &default_label);
+ Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, array_receiver,
+ &args, &arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
Node* new_length =
- BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, receiver, &args, &arg_index,
- &double_transition);
+ BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, array_receiver, &args,
+ &arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -1084,17 +1081,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// on the most generic implementation for the rest of the array.
BIND(&double_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIfNumber(arg, &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1107,13 +1104,13 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&default_label);
{
args.ForEach(
- [this, receiver, context](Node* arg) {
- Node* length = LoadJSArrayLength(receiver);
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(LanguageMode::kStrict));
+ [this, array_receiver, context](Node* arg) {
+ Node* length = LoadJSArrayLength(array_receiver);
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length,
+ arg, SmiConstant(LanguageMode::kStrict));
},
- arg_index);
- args.PopAndReturn(LoadJSArrayLength(receiver));
+ arg_index.value());
+ args.PopAndReturn(LoadJSArrayLength(array_receiver));
}
BIND(&runtime);
@@ -1131,8 +1128,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
- Label* slow) {
+ Node* HandleFastSlice(TNode<Context> context, Node* array, Node* from,
+ Node* count, Label* slow) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this);
@@ -1262,7 +1259,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
+ void CopyOneElement(TNode<Context> context, Node* o, Node* a, Node* p_k,
+ Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
@@ -1291,9 +1289,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
- VARIABLE(o, MachineRepresentation::kTagged);
+ TVARIABLE(JSReceiver, o);
VARIABLE(len, MachineRepresentation::kTagged);
Label length_done(this), generic_length(this), check_arguments_length(this),
load_arguments_length(this);
@@ -1301,8 +1299,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
GotoIf(TaggedIsSmi(receiver), &generic_length);
GotoIfNot(IsJSArray(receiver), &check_arguments_length);
- o.Bind(receiver);
- len.Bind(LoadJSArrayLength(receiver));
+ TNode<JSArray> array_receiver = CAST(receiver);
+ o = array_receiver;
+ len.Bind(LoadJSArrayLength(array_receiver));
// Check for the array clone case. There can be no arguments to slice, the
// array prototype chain must be intact and have no elements, the array has to
@@ -1318,7 +1317,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&check_arguments_length);
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* native_context = LoadNativeContext(context);
GotoIfContextElementEqual(map, native_context,
Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
@@ -1337,16 +1336,16 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&load_arguments_length);
Node* arguments_length =
- LoadObjectField(receiver, JSArgumentsObject::kLengthOffset);
+ LoadObjectField(array_receiver, JSArgumentsObject::kLengthOffset);
GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
- o.Bind(receiver);
+ o = CAST(receiver);
len.Bind(arguments_length);
Goto(&length_done);
BIND(&generic_length);
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O).
- o.Bind(CallBuiltin(Builtins::kToObject, context, receiver));
+ o = ToObject(context, receiver);
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -1359,7 +1358,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 5. Let relativeStart be ToInteger(start).
// 6. ReturnIfAbrupt(relativeStart).
- TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0)));
+ TNode<Object> arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
Node* relative_start = ToInteger_Inline(context, arg0);
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
@@ -1378,8 +1377,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 8. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ToInteger(end).
// 9. ReturnIfAbrupt(relativeEnd).
- TNode<Object> end =
- CAST(args.GetOptionalArgumentValue(1, UndefinedConstant()));
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
Label end_undefined(this), end_done(this);
VARIABLE(relative_end, MachineRepresentation::kTagged);
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
@@ -1460,12 +1458,13 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
}
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -1482,17 +1481,19 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -1514,10 +1515,10 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
IntPtrConstant(JSArray::kMaxCopyElements)),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
&fast_elements_smi);
@@ -1616,9 +1617,9 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
}
}
-TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
+TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
ParameterMode mode = OptimalParameterMode();
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
@@ -1629,8 +1630,8 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(ExtractFastJSArray(context, array, begin, count, mode));
}
-TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
CSA_ASSERT(this, IsJSArray(array));
@@ -1640,9 +1641,9 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(CloneFastJSArray(context, array, mode));
}
-TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1656,16 +1657,16 @@ TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
// Continuation that is called after an eager deoptimization from TF (ex. the
// array changes during iteration).
-TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1678,9 +1679,9 @@ TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// Continuation that is called after a lazy deoptimization from TF (ex. the
// callback function is no longer callable).
-TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1695,9 +1696,9 @@ TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// right after the callback and it's returned value must be handled before
// iteration continues.
TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1720,13 +1721,13 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1734,17 +1735,16 @@ TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.find", &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1758,15 +1758,14 @@ TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1777,10 +1776,9 @@ TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1792,9 +1790,9 @@ TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
}
TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1817,13 +1815,13 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1832,22 +1830,393 @@ TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayFindIndexLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
+class ArrayPopulatorAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayPopulatorAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(
+ ConstructJS(CodeFactory::Construct(isolate()), context, receiver));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map,
+ SmiConstant(0), SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Number> length) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ CSA_ASSERT(this, IsNumberNormalized(length));
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ receiver, length));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ Label next(this), runtime(this, Label::kDeferred);
+ TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
+ length, SmiConstant(0), ok, not_ok);
+ });
+ // This check also transitively covers the case where length is too big
+ // to be representable by a SMI and so is not usable with
+ // AllocateJSArray.
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
+ limit, &runtime, &next);
+
+ BIND(&runtime);
+ {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> array_function = CAST(
+ LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
+ array = CallRuntime(Runtime::kNewArray, context, array_function, length,
+ array_function, UndefinedConstant());
+ Goto(&done);
+ }
+
+ BIND(&next);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ // TODO(delphick): Consider using
+ // AllocateUninitializedJSArrayWithElements to avoid initializing an
+ // array and then writing over it.
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
+ SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ void GenerateSetLength(TNode<Context> context, TNode<Object> array,
+ TNode<Number> length) {
+ Label fast(this), runtime(this), done(this);
+ // TODO(delphick): We should be able to skip the fast set altogether, if the
+ // length already equals the expected length, which it always is now on the
+ // fast path.
+ // Only set the length in this stub if
+ // 1) the array has fast elements,
+ // 2) the length is writable,
+ // 3) the new length is equal to the old length.
+
+ // 1) Check that the array has fast elements.
+ // TODO(delphick): Consider changing this since it does an an unnecessary
+ // check for SMIs.
+ // TODO(delphick): Also we could hoist this to after the array construction
+ // and copy the args into array in the same way as the Array constructor.
+ BranchIfFastJSArray(array, context, &fast, &runtime);
+
+ BIND(&fast);
+ {
+ TNode<JSArray> fast_array = CAST(array);
+
+ TNode<Smi> length_smi = CAST(length);
+ TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+
+ // 2) Ensure that the length is writable.
+ // TODO(delphick): This check may be redundant due to the
+ // BranchIfFastJSArray above.
+ EnsureArrayLengthWritable(LoadMap(fast_array), &runtime);
+
+ // 3) If the created array's length does not match the required length,
+ // then use the runtime to set the property as that will insert holes
+ // into excess elements or shrink the backing store as appropriate.
+ GotoIf(SmiNotEqual(length_smi, old_length), &runtime);
+
+ StoreObjectFieldNoWriteBarrier(fast_array, JSArray::kLengthOffset,
+ length_smi);
+
+ Goto(&done);
+ }
+
+ BIND(&runtime);
+ {
+ CallRuntime(Runtime::kSetProperty, context, static_cast<Node*>(array),
+ CodeStubAssembler::LengthStringConstant(), length,
+ SmiConstant(LanguageMode::kStrict));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ }
+};
+
+// ES #sec-array.from
+TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+
+ TNode<Object> map_function = args.GetOptionalArgumentValue(1);
+
+ // If map_function is not undefined, then ensure it's callable else throw.
+ {
+ Label no_error(this), error(this);
+ GotoIf(IsUndefined(map_function), &no_error);
+ GotoIf(TaggedIsSmi(map_function), &error);
+ Branch(IsCallable(map_function), &no_error, &error);
+
+ BIND(&error);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_function);
+
+ BIND(&no_error);
+ }
+
+ Label iterable(this), not_iterable(this), finished(this), if_exception(this);
+
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+ TNode<Object> items = args.GetOptionalArgumentValue(0);
+ // The spec doesn't require ToObject to be called directly on the iterable
+ // branch, but it's part of GetMethod that is in the spec.
+ TNode<JSReceiver> array_like = ToObject(context, items);
+
+ TVARIABLE(Object, array);
+ TVARIABLE(Number, length);
+
+ // Determine whether items[Symbol.iterator] is defined:
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ Node* iterator_method =
+ iterator_assembler.GetIteratorMethod(context, array_like);
+ Branch(IsNullOrUndefined(iterator_method), &not_iterable, &iterable);
+
+ BIND(&iterable);
+ {
+ TVARIABLE(Number, index, SmiConstant(0));
+ TVARIABLE(Object, var_exception);
+ Label loop(this, &index), loop_done(this),
+ on_exception(this, Label::kDeferred),
+ index_overflow(this, Label::kDeferred);
+
+ // Check that the method is callable.
+ {
+ Label get_method_not_callable(this, Label::kDeferred), next(this);
+ GotoIf(TaggedIsSmi(iterator_method), &get_method_not_callable);
+ GotoIfNot(IsCallable(iterator_method), &get_method_not_callable);
+ Goto(&next);
+
+ BIND(&get_method_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable,
+ iterator_method);
+
+ BIND(&next);
+ }
+
+ // Construct the output array with empty length.
+ array = ConstructArrayLike(context, args.GetReceiver());
+
+ // Actually get the iterator and throw if the iterator method does not yield
+ // one.
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, items, iterator_method);
+
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ // Loop while iterator is not done.
+ TNode<Object> next = CAST(iterator_assembler.IteratorStep(
+ context, iterator_record, &loop_done, fast_iterator_result_map));
+ TVARIABLE(Object, value,
+ CAST(iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map)));
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value returned from the iterator. Exceptions are
+ // caught so the iterator can be closed.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ Node* v = CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value());
+ GotoIfException(v, &on_exception, &var_exception);
+ value = CAST(v);
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object (catching any exceptions so the
+ // iterator can be closed).
+ Node* define_status =
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ GotoIfException(define_status, &on_exception, &var_exception);
+
+ index = NumberInc(index.value());
+
+ // The spec requires that we throw an exception if index reaches 2^53-1,
+ // but an empty loop would take >100 days to do this many iterations. To
+ // actually run for that long would require an iterator that never set
+ // done to true and a target array which somehow never ran out of memory,
+ // e.g. a proxy that discarded the values. Ignoring this case just means
+ // we would repeatedly call CreateDataProperty with index = 2^53.
+ CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ NumberConstant(kMaxSafeInteger), ok,
+ not_ok);
+ });
+ Goto(&loop);
+ }
+
+ BIND(&loop_done);
+ {
+ length = index;
+ Goto(&finished);
+ }
+
+ BIND(&on_exception);
+ {
+ // Close the iterator, rethrowing either the passed exception or
+ // exceptions thrown during the close.
+ iterator_assembler.IteratorCloseOnException(context, iterator_record,
+ &var_exception);
+ }
+ }
+
+ // Since there's no iterator, items cannot be a Fast JS Array.
+ BIND(&not_iterable);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(IsFastJSArray(array_like, context)));
+
+ // Treat array_like as an array and try to get its length.
+ length = CAST(ToLength_Inline(
+ context, GetProperty(context, array_like, factory()->length_string())));
+
+ // Construct an array using the receiver as constructor with the same length
+ // as the input array.
+ array = ConstructArrayLike(context, args.GetReceiver(), length.value());
+
+ TVARIABLE(Number, index, SmiConstant(0));
+
+ GotoIf(SmiEqual(length.value(), SmiConstant(0)), &finished);
+
+ // Loop from 0 to length-1.
+ {
+ Label loop(this, &index);
+ Goto(&loop);
+ BIND(&loop);
+ TVARIABLE(Object, value);
+
+ value = GetProperty(context, array_like, index.value());
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value retrieved from the array.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value()));
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object.
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ index = NumberInc(index.value());
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ length.value(), &loop, &finished);
+ }
+ }
+
+ BIND(&finished);
+
+ // Finally set the length on the output and return it.
+ GenerateSetLength(context, array.value(), length.value());
+ args.PopAndReturn(array.value());
+}
+
+// ES #sec-array.of
+TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Smi> length = SmiFromInt32(argc);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
+
+ TNode<Object> array = ConstructArrayLike(context, args.GetReceiver(), length);
+
+ // TODO(delphick): Avoid using CreateDataProperty on the fast path.
+ BuildFastLoop(SmiConstant(0), length,
+ [=](Node* index) {
+ CallRuntime(
+ Runtime::kCreateDataProperty, context,
+ static_cast<Node*>(array), index,
+ args.AtIndex(index, ParameterMode::SMI_PARAMETERS));
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ GenerateSetLength(context, array, length);
+ args.PopAndReturn(array);
+}
+
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1856,19 +2225,19 @@ TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1877,14 +2246,14 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1898,15 +2267,13 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1917,10 +2284,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1931,13 +2297,13 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1946,20 +2312,20 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1968,14 +2334,14 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2002,9 +2368,9 @@ TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
}
}
-TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2015,9 +2381,9 @@ TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
len, UndefinedConstant()));
}
-TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2031,18 +2397,17 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2050,21 +2415,20 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.some", &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2073,14 +2437,14 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2107,10 +2471,9 @@ TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
{ Return(FalseConstant()); }
}
-TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2121,9 +2484,9 @@ TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2137,18 +2500,17 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2156,21 +2518,20 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.every", &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2179,14 +2540,14 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2200,15 +2561,30 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2220,10 +2596,9 @@ TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2235,13 +2610,13 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2249,21 +2624,20 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ "Array.prototype.reduce", &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2272,14 +2646,14 @@ TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction);
}
-TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2293,15 +2667,31 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- MissingPropertyMode::kSkip, ForEachDirection::kReverse);
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction, MissingPropertyMode::kSkip,
+ ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2313,10 +2703,9 @@ TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2328,13 +2717,13 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2343,21 +2732,21 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayReduceRightLoopContinuation),
MissingPropertyMode::kSkip, ForEachDirection::kReverse);
}
-TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2366,15 +2755,15 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2388,15 +2777,13 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2409,10 +2796,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
to));
}
-TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2451,13 +2837,13 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
to.value()));
}
-TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2465,17 +2851,16 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.filter",
- &ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.filter", &ArrayBuiltinsAssembler::FilterResultGenerator,
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2489,14 +2874,13 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SpecCompliantMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2508,9 +2892,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2533,13 +2917,13 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2547,20 +2931,20 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FastMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.map", &ArrayBuiltinsAssembler::MapResultGenerator,
+ &ArrayBuiltinsAssembler::FastMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2569,9 +2953,9 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.map",
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::TypedArrayMapResultGenerator,
+ &ArrayBuiltinsAssembler::TypedArrayMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
@@ -2620,7 +3004,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
TNode<Object> receiver = args.GetReceiver();
TNode<Object> search_element =
args.GetOptionalArgumentValue(kSearchElementArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* intptr_zero = IntPtrConstant(0);
@@ -2999,7 +3383,8 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void Generate_ArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ void Generate_ArrayPrototypeIterationMethod(TNode<Context> context,
+ TNode<Object> receiver,
IterationKind iteration_kind) {
VARIABLE(var_array, MachineRepresentation::kTagged);
VARIABLE(var_map, MachineRepresentation::kTagged);
@@ -3009,15 +3394,17 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
Label create_array_iterator(this);
GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
- var_array.Bind(receiver);
- var_map.Bind(LoadMap(receiver));
+
+ TNode<HeapObject> object_receiver = CAST(receiver);
+ var_array.Bind(object_receiver);
+ var_map.Bind(LoadMap(object_receiver));
var_type.Bind(LoadMapInstanceType(var_map.value()));
Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
&if_isnotobject);
BIND(&if_isnotobject);
{
- Node* result = CallBuiltin(Builtins::kToObject, context, receiver);
+ TNode<JSReceiver> result = ToObject(context, receiver);
var_array.Bind(result);
var_map.Bind(LoadMap(result));
var_type.Bind(LoadMapInstanceType(var_map.value()));
@@ -3031,31 +3418,30 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
};
TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kValues);
}
TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kEntries);
}
TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kKeys);
}
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
- Handle<String> operation = factory()->NewStringFromAsciiChecked(
- "Array Iterator.prototype.next", TENURED);
+ const char* method_name = "Array Iterator.prototype.next";
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* iterator = Parameter(Descriptor::kReceiver);
VARIABLE(var_value, MachineRepresentation::kTagged);
@@ -3300,6 +3686,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
@@ -3309,19 +3697,23 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE,
};
Label uint8_values(this), int8_values(this), uint16_values(this),
int16_values(this), uint32_values(this), int32_values(this),
- float32_values(this), float64_values(this);
+ float32_values(this), float64_values(this), biguint64_values(this),
+ bigint64_values(this);
Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values,
+ &allocate_key_result, &uint8_values, &uint8_values,
+ &int8_values, &uint16_values, &int16_values,
+ &uint32_values, &int32_values, &float32_values,
+ &float64_values, &biguint64_values, &bigint64_values,
+ &uint8_values, &uint8_values, &int8_values,
+ &uint16_values, &int16_values, &uint32_values,
+ &int32_values, &float32_values, &float64_values,
+ &biguint64_values, &bigint64_values,
};
var_done.Bind(FalseConstant());
@@ -3330,59 +3722,62 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&uint8_values);
{
- Node* value_uint8 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int8_values);
{
- Node* value_int8 = LoadFixedTypedArrayElement(
- data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint16_values);
{
- Node* value_uint16 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int16_values);
{
- Node* value_int16 = LoadFixedTypedArrayElement(
- data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint32_values);
{
- Node* value_uint32 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeUint32ToTagged(value_uint32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int32_values);
{
- Node* value_int32 = LoadFixedTypedArrayElement(
- data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeInt32ToTagged(value_int32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float32_values);
{
- Node* value_float32 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(
- AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value_float32)));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float64_values);
{
- Node* value_float64 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value_float64));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&biguint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGUINT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&bigint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGINT64_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
}
@@ -3447,14 +3842,12 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(operation), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), iterator);
}
BIND(&if_isdetached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation,
- HeapConstant(operation));
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
new file mode 100644
index 0000000000..67ac51480c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -0,0 +1,156 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayBuiltinsAssembler(compiler::CodeAssemblerState* state);
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)>
+ BuiltinResultGenerator;
+
+ typedef std::function<Node*(ArrayBuiltinsAssembler* masm, Node* k_value,
+ Node* k)>
+ CallResultProcessor;
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)> PostLoopAction;
+
+ enum class MissingPropertyMode { kSkip, kUseUndefined };
+
+ void FindResultGenerator();
+
+ Node* FindProcessor(Node* k_value, Node* k);
+
+ void FindIndexResultGenerator();
+
+ Node* FindIndexProcessor(Node* k_value, Node* k);
+
+ void ForEachResultGenerator();
+
+ Node* ForEachProcessor(Node* k_value, Node* k);
+
+ void SomeResultGenerator();
+
+ Node* SomeProcessor(Node* k_value, Node* k);
+
+ void EveryResultGenerator();
+
+ Node* EveryProcessor(Node* k_value, Node* k);
+
+ void ReduceResultGenerator();
+
+ Node* ReduceProcessor(Node* k_value, Node* k);
+
+ void ReducePostLoopAction();
+
+ void FilterResultGenerator();
+
+ Node* FilterProcessor(Node* k_value, Node* k);
+
+ void MapResultGenerator();
+
+ void TypedArrayMapResultGenerator();
+
+ Node* SpecCompliantMapProcessor(Node* k_value, Node* k);
+
+ Node* FastMapProcessor(Node* k_value, Node* k);
+
+ // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
+ Node* TypedArrayMapProcessor(Node* k_value, Node* k);
+
+ void NullPostLoopAction();
+
+ protected:
+ TNode<Context> context() { return context_; }
+ TNode<Object> receiver() { return receiver_; }
+ Node* new_target() { return new_target_; }
+ TNode<IntPtrT> argc() { return argc_; }
+ Node* o() { return o_; }
+ Node* len() { return len_; }
+ Node* callbackfn() { return callbackfn_; }
+ Node* this_arg() { return this_arg_; }
+ Node* k() { return k_.value(); }
+ Node* a() { return a_.value(); }
+
+ void ReturnFromBuiltin(Node* value);
+
+ void InitIteratingArrayBuiltinBody(TNode<Context> context,
+ TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target,
+ TNode<IntPtrT> argc);
+
+ void GenerateIteratingArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ const Callable& slow_case_continuation,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+ void InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to);
+
+ void GenerateIteratingTypedArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ void GenerateIteratingArrayBuiltinLoopContinuation(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ private:
+ static ElementsKind ElementsKindForInstanceType(InstanceType type);
+
+ void VisitAllTypedArrayElements(Node* array_buffer,
+ const CallResultProcessor& processor,
+ Label* detached, ForEachDirection direction);
+
+ void VisitAllFastElementsOneKind(ElementsKind kind,
+ const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ void HandleFastElements(const CallResultProcessor& processor,
+ const PostLoopAction& action, Label* slow,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ // This version is specialized to create a zero length array
+ // of the elements kind of the input array.
+ void GenerateArraySpeciesCreate();
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ void GenerateArraySpeciesCreate(SloppyTNode<Smi> len);
+
+ Node* callbackfn_ = nullptr;
+ Node* o_ = nullptr;
+ Node* this_arg_ = nullptr;
+ Node* len_ = nullptr;
+ TNode<Context> context_;
+ TNode<Object> receiver_;
+ Node* new_target_ = nullptr;
+ TNode<IntPtrT> argc_;
+ Node* fast_typed_array_target_ = nullptr;
+ const char* name_ = nullptr;
+ Variable k_;
+ Variable a_;
+ Variable to_;
+ Label fully_spec_compliant_;
+ ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 70ee2326f5..f400e824f0 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -10,6 +10,7 @@
#include "src/contexts.h"
#include "src/counters.h"
#include "src/elements.h"
+#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
@@ -251,7 +252,7 @@ BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*receiver);
if (V8_UNLIKELY(!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsArraySpeciesLookupChainIntact() ||
+ !isolate->IsSpeciesLookupChainIntact() ||
// If this is a subclass of Array, then call out to JS
!array->HasArrayPrototype(isolate))) {
AllowHeapAllocation allow_allocation;
@@ -316,7 +317,7 @@ BUILTIN(ArraySplice) {
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
- !isolate->IsArraySpeciesLookupChainIntact())) {
+ !isolate->IsSpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -1186,7 +1187,7 @@ BUILTIN(ArrayConcat) {
// Avoid a real species read to avoid extra lookups to the array constructor
if (V8_LIKELY(receiver->IsJSArray() &&
Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact())) {
+ isolate->IsSpeciesLookupChainIntact())) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 0d0e34ee0d..0db53c687e 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -21,37 +21,18 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
- void AsyncFunctionAwaitResumeClosure(
- Node* const context, Node* const sent_value,
- JSGeneratorObject::ResumeMode resume_mode);
+ void AsyncFunctionAwaitResume(Node* const context, Node* const argument,
+ Node* const generator,
+ JSGeneratorObject::ResumeMode resume_mode);
};
-namespace {
-
-// Describe fields of Context associated with AsyncFunctionAwait resume
-// closures.
-// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
-class AwaitContext {
- public:
- enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
-};
-
-} // anonymous namespace
-
-void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
- Node* context, Node* sent_value,
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResume(
+ Node* const context, Node* const argument, Node* const generator,
JSGeneratorObject::ResumeMode resume_mode) {
+ CSA_ASSERT(this, IsJSGeneratorObject(generator));
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
-
- // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
- // unnecessary runtime checks removed.
- // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
-
// Ensure that the generator is neither closed nor running.
CSA_SLOW_ASSERT(
this,
@@ -66,31 +47,23 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator);
-
- // The resulting Promise is a throwaway, so it doesn't matter what it
- // resolves to. What is important is that we don't end up keeping the
- // whole chain of intermediate Promises alive by returning the return value
- // of ResumeGenerator, as that would create a memory leak.
+ TailCallStub(callable, context, argument, generator);
}
-TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentError = Parameter(Descriptor::kSentError);
+TF_BUILTIN(AsyncFunctionAwaitFulfill, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentError,
- JSGeneratorObject::kThrow);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentValue = Parameter(Descriptor::kSentValue);
+TF_BUILTIN(AsyncFunctionAwaitReject, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kThrow);
}
// ES#abstract-ops-async-function-await
@@ -105,25 +78,12 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Node* const context, Node* const generator, Node* const awaited,
Node* const outer_promise, const bool is_predicted_as_caught) {
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
- // the awaited promise if it is already a promise. Reuse is non-spec compliant
- // but part of our old behavior gives us a couple of percent
- // performance boost.
- // TODO(jgruber): Use a faster specialized version of
- // InternalPerformPromiseThen.
-
- Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
- init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
- is_predicted_as_caught);
+ CSA_SLOW_ASSERT(this, IsJSGeneratorObject(generator));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+
+ Await(context, generator, awaited, outer_promise,
+ Builtins::kAsyncFunctionAwaitFulfill,
+ Builtins::kAsyncFunctionAwaitReject, is_predicted_as_caught);
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
@@ -133,30 +93,28 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = true;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = false;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 0cdcb57a3f..7958afba00 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -13,6 +13,58 @@ namespace internal {
using compiler::Node;
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught) {
+ CSA_SLOW_ASSERT(this, Word32Or(IsJSAsyncGeneratorObject(generator),
+ IsJSGeneratorObject(generator)));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+ CSA_SLOW_ASSERT(this, IsBoolean(is_predicted_as_caught));
+
+ Node* const native_context = LoadNativeContext(context);
+
+ // TODO(bmeurer): This could be optimized and folded into a single allocation.
+ Node* const promise = AllocateAndInitJSPromise(native_context);
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const fulfill_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), fulfill_builtin).code());
+ Node* const reject_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), reject_builtin).code());
+ Node* const reaction = AllocatePromiseReaction(
+ promise_reactions, generator, fulfill_handler, reject_handler);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ PromiseSetHasHandler(promise);
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « value »).
+ CallBuiltin(Builtins::kResolvePromise, native_context, promise, value);
+
+ // When debugging, we need to link from the {generator} to the
+ // {outer_promise} of the async function/generator.
+ Label done(this);
+ GotoIfNot(IsDebugActive(), &done);
+ CallRuntime(Runtime::kSetProperty, native_context, generator,
+ LoadRoot(Heap::kgenerator_outer_promise_symbolRootIndex),
+ outer_promise, SmiConstant(LanguageMode::kStrict));
+ GotoIf(IsFalse(is_predicted_as_caught), &done);
+ GotoIf(TaggedIsSmi(value), &done);
+ GotoIfNot(IsJSPromise(value), &done);
+ PromiseSetHandledHint(value);
+ Goto(&done);
+ BIND(&done);
+}
+
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ bool is_predicted_as_caught) {
+ return Await(context, generator, value, outer_promise, fulfill_builtin,
+ reject_builtin, BooleanConstant(is_predicted_as_caught));
+}
+
namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext {
@@ -22,165 +74,6 @@ class ValueUnwrapContext {
} // namespace
-Node* AsyncBuiltinsAssembler::Await(
- Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length, const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
- DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
-
- Node* const native_context = LoadNativeContext(context);
-
- static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
- static const int kThrowawayPromiseOffset =
- kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kResolveClosureOffset =
- kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kRejectClosureOffset =
- kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
- static const int kTotalSize =
- kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
-
- Node* const base = AllocateInNewSpace(kTotalSize);
- Node* const closure_context = base;
- {
- // Initialize closure context
- InitializeFunctionContext(native_context, closure_context, context_length);
- init_closure_context(closure_context);
- }
-
- // Let promiseCapability be ! NewPromiseCapability(%Promise%).
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const promise_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
- {
- // Initialize Promise
- StoreMapNoWriteBarrier(wrapped_value, promise_map);
- InitializeJSObjectFromMap(
- wrapped_value, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(wrapped_value);
- }
-
- Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
- {
- // Initialize throwawayPromise
- StoreMapNoWriteBarrier(throwaway, promise_map);
- InitializeJSObjectFromMap(
- throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(throwaway);
- }
-
- Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
- {
- // Initialize resolve handler
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
- }
-
- Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
- {
- // Initialize reject handler
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
- }
-
- {
- // Add PromiseHooks if needed
- Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
- CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
- outer_promise);
- CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
- Goto(&next);
- BIND(&next);
- }
-
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
-
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway);
-
- Label do_perform_promise_then(this);
- GotoIfNot(IsDebugActive(), &do_perform_promise_then);
- {
- Label common(this);
- GotoIf(TaggedIsSmi(value), &common);
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
- {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- Node* const key =
- HeapConstant(factory()->promise_forwarding_handler_symbol());
- CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
-
- GotoIf(IsFalse(is_predicted_as_caught), &common);
- PromiseSetHandledHint(value);
- }
-
- Goto(&common);
- BIND(&common);
- // Mark the dependency to outer Promise in case the throwaway Promise is
- // found on the Promise stack
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(LanguageMode::kStrict));
- }
-
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
-
- CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway);
-
- return wrapped_value;
-}
-
-void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
- Node* native_context,
- Node* function,
- Node* context_index) {
- Node* const function_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- // Ensure that we don't have to initialize prototype_or_initial_map field of
- // JSFunction.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
- IntPtrConstant(JSFunction::kSizeWithoutPrototype /
- kPointerSize)));
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreMapNoWriteBarrier(function, function_map);
- StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
- Heap::kUndefinedCellRootIndex);
-
- Node* shared_info = LoadContextElement(native_context, context_index);
- CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
- StoreObjectFieldNoWriteBarrier(
- function, JSFunction::kSharedFunctionInfoOffset, shared_info);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
-
- Node* const code =
- LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
-}
-
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 212b0b618b..70f68a498b 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
-#define V8_BUILTINS_BUILTINS_ASYNC_H_
+#ifndef V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
+#define V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
#include "src/builtins/builtins-promise-gen.h"
@@ -16,51 +16,26 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<void(Node*)> ContextInitializer;
-
- // Perform steps to resume generator after `value` is resolved.
- // `on_reject_context_index` is an index into the Native Context, which should
- // point to a SharedFunctioninfo instance used to create the closure. The
- // value following the reject index should be a similar value for the resolve
- // closure. Returns the Promise-wrapped `value`.
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught);
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- Node* is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, IntPtrConstant(on_resolve_context_index),
- IntPtrConstant(on_reject_context_index),
- is_predicted_as_caught);
- }
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- bool is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, on_resolve_context_index,
- on_reject_context_index,
- BooleanConstant(is_predicted_as_caught));
- }
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught);
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ bool is_predicted_as_caught);
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
private:
- void InitializeNativeClosure(Node* context, Node* native_context,
- Node* function, Node* context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
+ Node* AllocateAwaitPromiseJobTask(Node* generator, Node* fulfill_handler,
+ Node* reject_handler, Node* promise,
+ Node* context);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_ASYNC_H_
+#endif // V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 70726a5f9d..b78747aaa9 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -68,24 +68,24 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
return IsGeneratorStateNotExecuting(LoadGeneratorState(generator));
}
- inline Node* LoadGeneratorAwaitedPromise(Node* const generator) {
- return LoadObjectField(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset);
+ inline Node* IsGeneratorAwaiting(Node* const generator) {
+ Node* is_generator_awaiting =
+ LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset);
+ return SmiEqual(is_generator_awaiting, SmiConstant(1));
}
- inline Node* IsGeneratorNotSuspendedForAwait(Node* const generator) {
- return IsUndefined(LoadGeneratorAwaitedPromise(generator));
- }
-
- inline Node* IsGeneratorSuspendedForAwait(Node* const generator) {
- return HasInstanceType(LoadGeneratorAwaitedPromise(generator),
- JS_PROMISE_TYPE);
+ inline void SetGeneratorAwaiting(Node* const generator) {
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
}
- inline void ClearAwaitedPromise(Node* const generator) {
- StoreObjectFieldRoot(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- Heap::kUndefinedValueRootIndex);
+ inline void SetGeneratorNotAwaiting(Node* const generator) {
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
inline void CloseGenerator(Node* const generator) {
@@ -140,8 +140,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
// for AsyncGenerators.
template <typename Descriptor>
void AsyncGeneratorAwait(bool is_catchable);
- void AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+ void AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode);
};
@@ -193,7 +193,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
StringConstant(method_name), generator);
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, error,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, error,
TrueConstant());
args->PopAndReturn(promise);
}
@@ -219,21 +219,12 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
return request;
}
-void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
-#if defined(DEBUG) && defined(ENABLE_SLOW_DCHECKS)
- Node* const awaited_promise = LoadGeneratorAwaitedPromise(generator);
- CSA_SLOW_ASSERT(this, HasInstanceType(awaited_promise, JS_PROMISE_TYPE));
- CSA_SLOW_ASSERT(this, Word32NotEqual(PromiseStatus(awaited_promise),
- Int32Constant(v8::Promise::kPending)));
-#endif
-
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
@@ -242,40 +233,30 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
JSGeneratorObject::kResumeModeOffset,
SmiConstant(resume_mode));
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator);
+ CallStub(CodeFactory::ResumeGenerator(isolate()), context, argument,
+ generator);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(Descriptor::kGenerator);
- Node* value = Parameter(Descriptor::kAwaited);
- Node* context = Parameter(Descriptor::kContext);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, IsNotUndefined(request));
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
Node* outer_promise =
LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
- const int resolve_index = Context::ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN;
- const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, resolve_index, reject_index, is_catchable);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorAwaitFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_catchable);
Return(UndefinedConstant());
}
@@ -386,18 +367,20 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
"[AsyncGenerator].prototype.throw");
}
-TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kNext);
+TF_BUILTIN(AsyncGeneratorAwaitFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kThrow);
+TF_BUILTIN(AsyncGeneratorAwaitReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kThrow);
}
TF_BUILTIN(AsyncGeneratorAwaitUncaught, AsyncGeneratorBuiltinsAssembler) {
@@ -435,7 +418,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
CSA_ASSERT(this, IsGeneratorNotExecuting(generator));
// Stop resuming if suspended for Await.
- ReturnIf(IsGeneratorSuspendedForAwait(generator), UndefinedConstant());
+ ReturnIf(IsGeneratorAwaiting(generator), UndefinedConstant());
// Stop resuming if request queue is empty.
ReturnIf(IsUndefined(var_next.value()), UndefinedConstant());
@@ -452,10 +435,9 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
&settle_promise);
CloseGenerator(generator);
var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed));
-
Goto(&settle_promise);
- BIND(&settle_promise);
+ BIND(&settle_promise);
Node* next_value = LoadValueFromAsyncGeneratorRequest(next);
Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)),
&if_return, &if_throw);
@@ -511,7 +493,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
- CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// If this assertion fails, the `value` component was not Awaited as it should
// have been, per https://github.com/tc39/proposal-async-iteration/pull/102/.
@@ -537,7 +519,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
}
// Perform Call(promiseCapability.[[Resolve]], undefined, «iteratorResult»).
- CallBuiltin(Builtins::kResolveNativePromise, context, promise, iter_result);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
@@ -553,7 +535,7 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
- Return(CallBuiltin(Builtins::kRejectNativePromise, context, promise, value,
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value,
TrueConstant()));
}
@@ -566,34 +548,23 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
- const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, on_resolve, on_reject, is_caught);
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorYieldFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
Return(UndefinedConstant());
}
-TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorYieldFulfill, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// Per proposal-async-iteration/#sec-asyncgeneratoryield step 9
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *false*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
FalseConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
@@ -619,42 +590,33 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
Node* const generator = Parameter(Descriptor::kGenerator);
Node* const value = Parameter(Descriptor::kValue);
Node* const is_caught = Parameter(Descriptor::kIsCaught);
+ Node* const context = Parameter(Descriptor::kContext);
Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator);
+ Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
CSA_ASSERT(this, IsNotUndefined(req));
- Label perform_await(this);
- VARIABLE(var_on_resolve, MachineType::PointerRepresentation(),
- IntPtrConstant(
- Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
- VARIABLE(
- var_on_reject, MachineType::PointerRepresentation(),
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN));
-
+ Label if_closed(this, Label::kDeferred), if_not_closed(this), done(this);
Node* const state = LoadGeneratorState(generator);
- GotoIf(IsGeneratorStateClosed(state), &perform_await);
- var_on_resolve.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN));
- var_on_reject.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN));
- Goto(&perform_await);
+ SetGeneratorAwaiting(generator);
+ Branch(IsGeneratorStateClosed(state), &if_closed, &if_not_closed);
- BIND(&perform_await);
+ BIND(&if_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnClosedFulfill,
+ Builtins::kAsyncGeneratorReturnClosedReject, is_caught);
+ Goto(&done);
+ }
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
+ BIND(&if_not_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
+ Goto(&done);
+ }
- Node* const context = Parameter(Descriptor::kContext);
- Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, var_on_resolve.value(), var_on_reject.value(),
- is_caught);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ BIND(&done);
Return(UndefinedConstant());
}
@@ -662,49 +624,44 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// Resume the generator with "return" resume_mode, and finally perform
// AsyncGeneratorResumeNext. Per
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
-TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSGeneratorObject::kReturn);
}
// On-resolve closure for Await in AsyncGeneratorReturn
// Perform AsyncGeneratorResolve({awaited_value}, true) and finally perform
// AsyncGeneratorResumeNext.
-TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-fulfilled step 2:
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *true*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
TrueConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
-TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-rejected step 2:
// Return ! AsyncGeneratorReject(_F_.[[Generator]], _reason_).
- CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, value);
+ CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, argument);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index f232b32700..58691bd00e 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -28,13 +28,29 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
typedef std::function<void(Node* const context, Node* const promise,
Label* if_exception)>
UndefinedMethodHandler;
+ typedef std::function<Node*(Node*)> SyncIteratorNodeGenerator;
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
Node* const initial_exception_value = nullptr);
+ void Generate_AsyncFromSyncIteratorMethod(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr) {
+ auto get_method = [=](Node* const sync_iterator) {
+ return GetProperty(context, sync_iterator, name);
+ };
+ return Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, sent_value, get_method, if_method_undefined,
+ operation_name, reject_label_type, initial_exception_value);
+ }
+
// Load "value" and "done" from an iterator result object. If an exception
// is thrown at any point, jumps to te `if_exception` label with exception
// stored in `var_exception`.
@@ -79,7 +95,8 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
Node* const initial_exception_value) {
Node* const native_context = LoadNativeContext(context);
@@ -96,7 +113,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const sync_iterator =
LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
- Node* const method = GetProperty(context, sync_iterator, method_name);
+ Node* const method = get_method(sync_iterator);
if (if_method_undefined) {
Label if_isnotundefined(this);
@@ -119,7 +136,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
// throwValue »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapper, value);
+ CallBuiltin(Builtins::kResolvePromise, context, wrapper, value);
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
@@ -128,13 +145,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
// onFulfilled, undefined, promiseCapability).
- Return(CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapper,
+ Return(CallBuiltin(Builtins::kPerformPromiseThen, context, wrapper,
on_fulfilled, UndefinedConstant(), promise));
BIND(&reject_promise);
{
Node* const exception = var_exception.value();
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, exception,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
TrueConstant());
Return(promise);
}
@@ -211,6 +228,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&done);
return std::make_pair(var_value.value(), var_done.value());
}
+
} // namespace
// https://tc39.github.io/proposal-async-iteration/
@@ -220,9 +238,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ auto get_method = [=](Node* const unused) {
+ return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
+ };
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, value, factory()->next_string(),
- UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
+ context, iterator, value, get_method, UndefinedMethodHandler(),
+ "[Async-from-Sync Iterator].prototype.next");
}
// https://tc39.github.io/proposal-async-iteration/
@@ -243,7 +264,7 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
// IfAbruptRejectPromise(nextDone, promiseCapability).
// Return promiseCapability.[[Promise]].
- PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
Return(promise);
};
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6d9bb6e797..fdbd3937d4 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -36,33 +36,6 @@ BUILTIN(BigIntConstructor_ConstructStub) {
isolate->factory()->BigInt_string()));
}
-BUILTIN(BigIntParseInt) {
- HandleScope scope(isolate);
- Handle<Object> string = args.atOrUndefined(isolate, 1);
- Handle<Object> radix = args.atOrUndefined(isolate, 2);
-
- // Convert {string} to a String and flatten it.
- // Fast path: avoid back-and-forth conversion for Smi inputs.
- if (string->IsSmi() && radix->IsUndefined(isolate)) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, string));
- }
- Handle<String> subject;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
- Object::ToString(isolate, string));
- subject = String::Flatten(subject);
-
- // Convert {radix} to Int32.
- if (!radix->IsNumber()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
- }
- int radix32 = DoubleToInt32(radix->Number());
- if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewSyntaxError(MessageTemplate::kToRadixFormatRange));
- }
- RETURN_RESULT_OR_FAILURE(isolate, BigIntParseInt(isolate, subject, radix32));
-}
-
BUILTIN(BigIntAsUintN) {
HandleScope scope(isolate);
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
@@ -97,14 +70,6 @@ BUILTIN(BigIntAsIntN) {
return *BigInt::AsIntN(bits->Number(), bigint);
}
-BUILTIN(BigIntPrototypeToLocaleString) {
- HandleScope scope(isolate);
-
- // TODO(jkummerow): Implement.
-
- UNIMPLEMENTED();
-}
-
namespace {
MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
@@ -127,18 +92,14 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
BigInt);
}
-} // namespace
-
-BUILTIN(BigIntPrototypeToString) {
- HandleScope scope(isolate);
+Object* BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
+ Isolate* isolate, const char* builtin_name) {
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x,
- ThisBigIntValue(isolate, args.receiver(), "BigInt.prototype.toString"));
+ isolate, x, ThisBigIntValue(isolate, receiver, builtin_name));
// 2. If radix is not present, let radixNumber be 10.
// 3. Else if radix is undefined, let radixNumber be 10.
- Handle<Object> radix = args.atOrUndefined(isolate, 1);
int radix_number;
if (radix->IsUndefined(isolate)) {
radix_number = 10;
@@ -158,6 +119,22 @@ BUILTIN(BigIntPrototypeToString) {
RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(x, radix_number));
}
+} // namespace
+
+BUILTIN(BigIntPrototypeToLocaleString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = isolate->factory()->undefined_value();
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toLocaleString");
+}
+
+BUILTIN(BigIntPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = args.atOrUndefined(isolate, 1);
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toString");
+}
+
BUILTIN(BigIntPrototypeValueOf) {
HandleScope scope(isolate);
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index d4a7153d74..7443202c98 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -168,7 +168,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Node* elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
var_elements.Bind(elements);
- var_length.Bind(SmiToWord32(length));
+ var_length.Bind(SmiToInt32(length));
Goto(&if_done);
}
@@ -289,12 +289,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_runtime);
// Check that the map of the initial array iterator hasn't changed.
- Node* native_context = LoadNativeContext(context);
- Node* arr_it_proto_map = LoadMap(CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
- Node* initial_map = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
- GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
+ TNode<Context> native_context = LoadNativeContext(context);
+ GotoIfNot(HasInitialArrayIteratorPrototypeMap(native_context), &if_runtime);
Node* kind = LoadMapElementsKind(spread_map);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 392040c995..563703707c 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -26,31 +26,32 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
virtual ~BaseCollectionsAssembler() {}
protected:
- enum Variant { kMap, kSet };
+ enum Variant { kMap, kSet, kWeakMap, kWeakSet };
// Adds an entry to a collection. For Maps, properly handles extracting the
// key and value from the entry (see LoadKeyValue()).
- TNode<Object> AddConstructorEntry(Variant variant, TNode<Context> context,
- TNode<Object> collection,
- TNode<Object> add_function,
- TNode<Object> key_value,
- Label* if_exception = nullptr,
- TVariable<Object>* var_exception = nullptr);
+ void AddConstructorEntry(Variant variant, TNode<Context> context,
+ TNode<Object> collection, TNode<Object> add_function,
+ TNode<Object> key_value,
+ Label* if_may_have_side_effects = nullptr,
+ Label* if_exception = nullptr,
+ TVariable<Object>* var_exception = nullptr);
// Adds constructor entries to a collection. Choosing a fast path when
// possible.
void AddConstructorEntries(Variant variant, TNode<Context> context,
TNode<Context> native_context,
TNode<Object> collection,
- TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray);
+ TNode<Object> initial_entries);
// Fast path for adding constructor entries. Assumes the entries are a fast
// JS array (see CodeStubAssembler::BranchIfFastJSArray()).
void AddConstructorEntriesFromFastJSArray(Variant variant,
TNode<Context> context,
+ TNode<Context> native_context,
TNode<Object> collection,
- TNode<JSArray> fast_jsarray);
+ TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects);
// Adds constructor entries to a collection using the iterator protocol.
void AddConstructorEntriesFromIterable(Variant variant,
@@ -61,8 +62,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Constructs a collection instance. Choosing a fast path when possible.
TNode<Object> AllocateJSCollection(TNode<Context> context,
- TNode<Context> native_context,
- int constructor_function_index,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Fast path for constructing a collection instance if the constructor
@@ -72,7 +72,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Fallback for constructing a collection instance if the constructor function
// has been modified.
TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
- TNode<HeapObject> constructor,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Allocates the backing store for a collection.
@@ -81,15 +81,26 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Main entry point for a collection constructor builtin.
void GenerateConstructor(Variant variant,
- const int constructor_function_index,
- Handle<String> constructor_function_name,
- int collection_tableoffset);
+ Handle<String> constructor_function_name);
// Retrieves the collection function that adds an entry. `set` for Maps and
// `add` for Sets.
TNode<Object> GetAddFunction(Variant variant, TNode<Context> context,
TNode<Object> collection);
+ // Retrieves the collection constructor function.
+ TNode<JSFunction> GetConstructor(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the initial collection function that adds an entry. Should only
+ // be called when it is certain that a collection prototype's map hasn't been
+ // changed.
+ TNode<JSFunction> GetInitialAddFunction(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the offset to access the backing table from the collection.
+ int GetTableOffset(Variant variant);
+
// Estimates the number of entries the collection will have after adding the
// entries passed in the constructor. AllocateTable() can use this to avoid
// the time of growing/rehashing when adding the constructor entries.
@@ -98,6 +109,11 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver);
+ // Determines whether the collection's prototype has been modified.
+ TNode<BoolT> HasInitialCollectionPrototype(Variant variant,
+ TNode<Context> native_context,
+ TNode<Object> collection);
+
// Loads an element from a fixed array. If the element is the hole, returns
// `undefined`.
TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<Object> elements,
@@ -112,59 +128,85 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// array. If the array lacks 2 elements, undefined is used.
void LoadKeyValue(TNode<Context> context, TNode<Object> maybe_array,
TVariable<Object>* key, TVariable<Object>* value,
+ Label* if_may_have_side_effects = nullptr,
Label* if_exception = nullptr,
TVariable<Object>* var_exception = nullptr);
};
-TNode<Object> BaseCollectionsAssembler::AddConstructorEntry(
+void BaseCollectionsAssembler::AddConstructorEntry(
Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<Object> add_function, TNode<Object> key_value, Label* if_exception,
+ TNode<Object> add_function, TNode<Object> key_value,
+ Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
- if (variant == kMap) {
- Label exit(this), if_notobject(this, Label::kDeferred);
- GotoIfNotJSReceiver(key_value, &if_notobject);
-
+ if (variant == kMap || variant == kWeakMap) {
TVARIABLE(Object, key);
TVARIABLE(Object, value);
- LoadKeyValue(context, key_value, &key, &value, if_exception, var_exception);
- Node* key_n = key;
- Node* value_n = value;
- TNode<Object> add_call =
- UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_n, value_n));
- Goto(&exit);
-
- BIND(&if_notobject);
- {
- Node* ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIteratorValueNotAnObject), key_value);
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(ret, if_exception, var_exception);
- }
- Unreachable();
- }
- BIND(&exit);
- return add_call;
-
- } else { // variant == kSet
- DCHECK(variant == kSet);
- return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_value));
+ LoadKeyValue(context, key_value, &key, &value, if_may_have_side_effects,
+ if_exception, var_exception);
+ Node* key_n = key.value();
+ Node* value_n = value.value();
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_n, value_n);
+ GotoIfException(ret, if_exception, var_exception);
+ } else {
+ DCHECK(variant == kSet || variant == kWeakSet);
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_value);
+ GotoIfException(ret, if_exception, var_exception);
}
}
void BaseCollectionsAssembler::AddConstructorEntries(
Variant variant, TNode<Context> context, TNode<Context> native_context,
- TNode<Object> collection, TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray) {
- Label exit(this), slow_loop(this, Label::kDeferred);
- GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ TNode<Object> collection, TNode<Object> initial_entries) {
+ TVARIABLE(BoolT, use_fast_loop,
+ IsFastJSArrayWithNoCustomIteration(initial_entries, context,
+ native_context));
+ TNode<IntPtrT> at_least_space_for =
+ EstimatedInitialSize(initial_entries, use_fast_loop.value());
+ Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this),
+ slow_loop(this, Label::kDeferred);
+ Goto(&allocate_table);
+ BIND(&allocate_table);
+ {
+ TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ StoreObjectField(collection, GetTableOffset(variant), table);
+ GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ GotoIfNot(
+ HasInitialCollectionPrototype(variant, native_context, collection),
+ &slow_loop);
+ Branch(use_fast_loop.value(), &fast_loop, &slow_loop);
+ }
+ BIND(&fast_loop);
+ {
+ TNode<JSArray> initial_entries_jsarray =
+ UncheckedCast<JSArray>(initial_entries);
+#if DEBUG
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ initial_entries_jsarray, context, native_context));
+ TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
+#endif
+
+ Label if_may_have_side_effects(this, Label::kDeferred);
+ AddConstructorEntriesFromFastJSArray(variant, context, native_context,
+ collection, initial_entries_jsarray,
+ &if_may_have_side_effects);
+ Goto(&exit);
- // TODO(mvstanton): Re-enable the fast path when a fix is found for
- // crbug.com/798026.
+ if (variant == kMap || variant == kWeakMap) {
+ BIND(&if_may_have_side_effects);
+#if DEBUG
+ CSA_ASSERT(this, HasInitialCollectionPrototype(variant, native_context,
+ collection));
+ CSA_ASSERT(this, WordEqual(original_initial_entries_map,
+ LoadMap(initial_entries_jsarray)));
+#endif
+ use_fast_loop = Int32FalseConstant();
+ Goto(&allocate_table);
+ }
+ }
+ BIND(&slow_loop);
{
AddConstructorEntriesFromIterable(variant, context, native_context,
collection, initial_entries);
@@ -174,17 +216,26 @@ void BaseCollectionsAssembler::AddConstructorEntries(
}
void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
- Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<JSArray> fast_jsarray) {
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects) {
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(fast_jsarray));
+ TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
+ CSA_ASSERT(
+ this,
+ WordEqual(GetAddFunction(variant, native_context, collection), add_func));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context,
+ native_context));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
- TNode<Object> add_func = GetAddFunction(variant, context, collection);
-
- CSA_ASSERT(this, IsFastJSArray(fast_jsarray, context));
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+ CSA_ASSERT(
+ this, HasInitialCollectionPrototype(variant, native_context, collection));
+#if DEBUG
+ TNode<Map> original_collection_map = LoadMap(CAST(collection));
+ TNode<Map> original_fast_js_array_map = LoadMap(fast_jsarray);
+#endif
Label exit(this), if_doubles(this), if_smiorobjects(this);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
&if_doubles);
@@ -193,8 +244,14 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
auto set_entry = [&](Node* index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(variant, context, collection, add_func, element);
+ AddConstructorEntry(variant, context, collection, add_func, element,
+ if_may_have_side_effects);
};
+
+ // Instead of using the slower iteration protocol to iterate over the
+ // elements, a fast loop is used. This assumes that adding an element
+ // to the collection does not call user code that could mutate the elements
+ // or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Goto(&exit);
@@ -203,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
{
// A Map constructor requires entries to be arrays (ex. [key, value]),
// so a FixedDoubleArray can never succeed.
- if (variant == kMap) {
+ if (variant == kMap || variant == kWeakMap) {
TNode<Float64T> element =
UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
elements, IntPtrConstant(0), MachineType::Float64(), 0,
@@ -211,10 +268,11 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
AllocateHeapNumberWithValue(element));
} else {
+ DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(kSet, context, collection, add_func, entry);
+ AddConstructorEntry(variant, context, collection, add_func, entry);
};
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
@@ -222,6 +280,12 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
}
}
BIND(&exit);
+#if DEBUG
+ CSA_ASSERT(this,
+ WordEqual(original_collection_map, LoadMap(CAST(collection))));
+ CSA_ASSERT(this,
+ WordEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
+#endif
}
void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
@@ -247,10 +311,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
context, iterator, &exit, fast_iterator_result_map));
TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
context, next, fast_iterator_result_map));
- TNode<Object> add_result =
- AddConstructorEntry(variant, context, collection, add_func, next_value,
- &if_exception, &var_exception);
- GotoIfException(add_result, &if_exception, &var_exception);
+ AddConstructorEntry(variant, context, collection, add_func, next_value,
+ nullptr, &if_exception, &var_exception);
Goto(&loop);
}
BIND(&if_exception);
@@ -262,10 +324,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
- TNode<Context> context, TNode<Context> native_context,
- int constructor_function_index, TNode<Object> new_target) {
- TNode<HeapObject> constructor =
- CAST(LoadContextElement(native_context, constructor_function_index));
+ TNode<Context> context, TNode<JSFunction> constructor,
+ TNode<Object> new_target) {
TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
return Select<Object>(is_target_unmodified,
@@ -286,7 +346,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
- TNode<Context> context, TNode<HeapObject> constructor,
+ TNode<Context> context, TNode<JSFunction> constructor,
TNode<Object> new_target) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
@@ -294,8 +354,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
}
void BaseCollectionsAssembler::GenerateConstructor(
- Variant variant, const int constructor_function_index,
- Handle<String> constructor_function_name, int collection_tableoffset) {
+ Variant variant, Handle<String> constructor_function_name) {
const int kIterableArg = 0;
CodeStubArguments args(
this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
@@ -306,17 +365,11 @@ void BaseCollectionsAssembler::GenerateConstructor(
Label if_undefined(this, Label::kDeferred);
GotoIf(IsUndefined(new_target), &if_undefined);
- TNode<BoolT> is_fast_jsarray = IsFastJSArray(iterable, context);
- TNode<IntPtrT> at_least_space_for =
- EstimatedInitialSize(iterable, is_fast_jsarray);
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> collection = AllocateJSCollection(
- context, native_context, constructor_function_index, new_target);
- TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ context, GetConstructor(variant, native_context), new_target);
- StoreObjectField(collection, collection_tableoffset, table);
- AddConstructorEntries(variant, context, native_context, collection, iterable,
- is_fast_jsarray);
+ AddConstructorEntries(variant, context, native_context, collection, iterable);
Return(collection);
BIND(&if_undefined);
@@ -326,14 +379,10 @@ void BaseCollectionsAssembler::GenerateConstructor(
TNode<Object> BaseCollectionsAssembler::GetAddFunction(
Variant variant, TNode<Context> context, TNode<Object> collection) {
- // TODO(pwong): Consider calling the builtin directly when the prototype is
- // unmodified. This will require tracking WeakMap/WeakSet prototypes on the
- // native context.
- Handle<String> add_func_name = variant == kMap
+ Handle<String> add_func_name = (variant == kMap || variant == kWeakMap)
? isolate()->factory()->set_string()
: isolate()->factory()->add_string();
- TNode<Object> add_func =
- CAST(GetProperty(context, collection, add_func_name));
+ TNode<Object> add_func = GetProperty(context, collection, add_func_name);
Label exit(this), if_notcallable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(add_func), &if_notcallable);
@@ -348,6 +397,60 @@ TNode<Object> BaseCollectionsAssembler::GetAddFunction(
return add_func;
}
+TNode<JSFunction> BaseCollectionsAssembler::GetConstructor(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::JS_MAP_FUN_INDEX;
+ break;
+ case kSet:
+ index = Context::JS_SET_FUN_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::JS_WEAK_MAP_FUN_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::JS_WEAK_SET_FUN_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+TNode<JSFunction> BaseCollectionsAssembler::GetInitialAddFunction(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::MAP_SET_INDEX;
+ break;
+ case kSet:
+ index = Context::SET_ADD_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::WEAKMAP_SET_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::WEAKSET_ADD_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+int BaseCollectionsAssembler::GetTableOffset(Variant variant) {
+ switch (variant) {
+ case kMap:
+ return JSMap::kTableOffset;
+ case kSet:
+ return JSSet::kTableOffset;
+ case kWeakMap:
+ return JSWeakMap::kTableOffset;
+ case kWeakSet:
+ return JSWeakSet::kTableOffset;
+ }
+ UNREACHABLE();
+}
+
TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
TNode<Object> initial_entries, TNode<BoolT> is_fast_jsarray) {
return Select<IntPtrT>(
@@ -362,6 +465,31 @@ void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
GotoIfNot(IsJSReceiver(obj), if_not_receiver);
}
+TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
+ Variant variant, TNode<Context> native_context, TNode<Object> collection) {
+ int initial_prototype_index;
+ switch (variant) {
+ case kMap:
+ initial_prototype_index = Context::INITIAL_MAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kSet:
+ initial_prototype_index = Context::INITIAL_SET_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakMap:
+ initial_prototype_index = Context::INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakSet:
+ initial_prototype_index = Context::INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX;
+ break;
+ }
+ TNode<Map> initial_prototype_map =
+ CAST(LoadContextElement(native_context, initial_prototype_index));
+ TNode<Map> collection_proto_map =
+ LoadMap(CAST(LoadMapPrototype(LoadMap(CAST(collection)))));
+
+ return WordEqual(collection_proto_map, initial_prototype_map);
+}
+
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<Object> elements, TNode<IntPtrT> index) {
TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
@@ -386,15 +514,13 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
Goto(&next);
}
BIND(&next);
- return entry;
+ return entry.value();
}
-void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
- TNode<Object> maybe_array,
- TVariable<Object>* key,
- TVariable<Object>* value,
- Label* if_exception,
- TVariable<Object>* var_exception) {
+void BaseCollectionsAssembler::LoadKeyValue(
+ TNode<Context> context, TNode<Object> maybe_array, TVariable<Object>* key,
+ TVariable<Object>* value, Label* if_may_have_side_effects,
+ Label* if_exception, TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(maybe_array)));
Label exit(this), if_fast(this), if_slow(this, Label::kDeferred);
@@ -461,20 +587,31 @@ void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
}
BIND(&if_slow);
{
- *key = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->zero_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*key, if_exception, var_exception);
- }
+ Label if_notobject(this, Label::kDeferred);
+ GotoIfNotJSReceiver(maybe_array, &if_notobject);
+ if (if_may_have_side_effects != nullptr) {
+ // If the element is not a fast array, we cannot guarantee accessing the
+ // key and value won't execute user code that will break fast path
+ // assumptions.
+ Goto(if_may_have_side_effects);
+ } else {
+ *key = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->zero_string()));
+ GotoIfException(key->value(), if_exception, var_exception);
- *value = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->one_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*value, if_exception, var_exception);
+ *value = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->one_string()));
+ GotoIfException(value->value(), if_exception, var_exception);
+ Goto(&exit);
+ }
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIteratorValueNotAnObject), maybe_array);
+ GotoIfException(ret, if_exception, var_exception);
+ Unreachable();
}
- Goto(&exit);
}
BIND(&exit);
}
@@ -672,18 +809,17 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
Variant variant, TNode<Context> context,
TNode<IntPtrT> at_least_space_for) {
- return CAST(variant == kMap ? AllocateOrderedHashTable<OrderedHashMap>()
- : AllocateOrderedHashTable<OrderedHashSet>());
+ return CAST((variant == kMap || variant == kWeakMap)
+ ? AllocateOrderedHashTable<OrderedHashMap>()
+ : AllocateOrderedHashTable<OrderedHashSet>());
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_MAP_FUN_INDEX,
- isolate()->factory()->Map_string(), JSMap::kTableOffset);
+ GenerateConstructor(kMap, isolate()->factory()->Map_string());
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_SET_FUN_INDEX,
- isolate()->factory()->Set_string(), JSSet::kTableOffset);
+ GenerateConstructor(kSet, isolate()->factory()->Set_string());
}
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
@@ -1049,9 +1185,9 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table.Bind(next_table);
- var_index.Bind(
- SmiUntag(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index))));
+ var_index.Bind(SmiUntag(
+ CAST(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
+ NoContextConstant(), table, SmiTag(index)))));
Goto(&loop);
}
BIND(&done_loop);
@@ -1624,7 +1760,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -1837,7 +1974,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
InstanceTypeEqual(receiver_instance_type, JS_SET_KEY_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -2019,7 +2157,7 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
// See HashTableBase::ElementAdded().
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
@@ -2043,7 +2181,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kCapacityIndex,
- SmiFromWord(capacity), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(capacity), SKIP_WRITE_BARRIER);
TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length,
@@ -2083,16 +2221,15 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
BIND(&loop);
TNode<IntPtrT> key_index;
{
- key_index = KeyIndexFromEntry(var_entry);
+ key_index = KeyIndexFromEntry(var_entry.value());
TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
key_compare(entry_key, &if_found);
// See HashTable::NextProbe().
Increment(&var_count);
- var_entry = WordAnd(IntPtrAdd(UncheckedCast<IntPtrT>(var_entry),
- UncheckedCast<IntPtrT>(var_count)),
- entry_mask);
+ var_entry =
+ WordAnd(IntPtrAdd(var_entry.value(), var_count.value()), entry_mask);
Goto(&loop);
}
@@ -2186,9 +2323,9 @@ void WeakCollectionsBuiltinsAssembler::RemoveEntry(
// See HashTableBase::ElementRemoved().
TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table, 1);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
- SmiFromWord(number_of_deleted), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_deleted), SKIP_WRITE_BARRIER);
}
TNode<BoolT> WeakCollectionsBuiltinsAssembler::ShouldRehash(
@@ -2222,15 +2359,11 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_WEAK_MAP_FUN_INDEX,
- isolate()->factory()->WeakMap_string(),
- JSWeakMap::kTableOffset);
+ GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string());
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_WEAK_SET_FUN_INDEX,
- isolate()->factory()->WeakSet_string(),
- JSWeakSet::kTableOffset);
+ GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string());
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
@@ -2342,8 +2475,8 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
TNode<IntPtrT> entry_mask = EntryMask(capacity);
TVARIABLE(IntPtrT, var_hash, LoadJSReceiverIdentityHash(key, &if_no_hash));
- TNode<IntPtrT> key_index =
- FindKeyIndexForKey(table, key, var_hash, entry_mask, &if_not_found);
+ TNode<IntPtrT> key_index = FindKeyIndexForKey(table, key, var_hash.value(),
+ entry_mask, &if_not_found);
StoreFixedArrayElement(table, ValueIndexFromKeyIndex(key_index), value);
Return(collection);
@@ -2365,14 +2498,14 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
&call_runtime);
TNode<IntPtrT> insertion_key_index =
- FindKeyIndexForInsertion(table, var_hash, entry_mask);
+ FindKeyIndexForInsertion(table, var_hash.value(), entry_mask);
AddEntry(table, insertion_key_index, key, value, number_of_elements);
Return(collection);
}
BIND(&call_runtime);
{
CallRuntime(Runtime::kWeakCollectionSet, context, collection, key, value,
- SmiTag(var_hash));
+ SmiTag(var_hash.value()));
Return(collection);
}
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 5c3883a870..945fb4394b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -55,21 +55,54 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
typedef compiler::Node Node;
-Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
- Node* feedback_vector,
- Node* slot,
- Node* context) {
- Isolate* isolate = this->isolate();
- Factory* factory = isolate->factory();
- IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
- Node* compiler_hints =
- LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
+Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
+ return TaggedIsSmi(literal_site);
+}
+
+Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
+ CSA_ASSERT(this, IsAllocationSite(site));
+ return LoadObjectField(site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+ Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
+ Node* feedback_cell = Parameter(Descriptor::kFeedbackCell);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info));
+
+ IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
+
+ // Bump the closure counter encoded the {feedback_cell}s map.
+ {
+ Node* const feedback_cell_map = LoadMap(feedback_cell);
+ Label no_closures(this), one_closure(this), cell_done(this);
+
+ GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
+ GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
+ CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
+ feedback_cell_map, feedback_cell);
+ Goto(&cell_done);
+
+ BIND(&no_closures);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kOneClosureCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&one_closure);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kManyClosuresCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&cell_done);
+ }
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* function_map_index =
+ Node* const compiler_hints = LoadObjectField(
+ shared_function_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* const function_map_index =
IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
compiler_hints),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
@@ -79,24 +112,24 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Get the function map in the current native context and set that
// as the map of the allocated object.
- Node* native_context = LoadNativeContext(context);
- Node* function_map = LoadContextElement(native_context, function_map_index);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const function_map =
+ LoadContextElement(native_context, function_map_index);
// Create a new closure from the given function info in new space
Node* instance_size_in_bytes =
TimesPointerSize(LoadMapInstanceSizeInWords(function_map));
- Node* result = Allocate(instance_size_in_bytes);
+ Node* const result = Allocate(instance_size_in_bytes);
StoreMapNoWriteBarrier(result, function_map);
InitializeJSObjectBodyNoSlackTracking(result, function_map,
instance_size_in_bytes,
JSFunction::kSizeWithoutPrototype);
// Initialize the rest of the function.
- Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOrHashOffset,
- empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
- empty_fixed_array);
+ StoreObjectFieldRoot(result, JSObject::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
{
// Set function prototype if necessary.
Label done(this), init_prototype(this);
@@ -104,65 +137,23 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
&done);
BIND(&init_prototype);
- StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ StoreObjectFieldRoot(result, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
Goto(&done);
-
BIND(&done);
}
- Node* literals_cell = LoadFeedbackVectorSlot(
- feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
- {
- // Bump the closure counter encoded in the cell's map.
- Node* cell_map = LoadMap(literals_cell);
- Label no_closures(this), one_closure(this), cell_done(this);
-
- GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
- GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
- CSA_ASSERT(this, IsManyClosuresCellMap(cell_map), cell_map, literals_cell,
- feedback_vector, slot);
- Goto(&cell_done);
-
- BIND(&no_closures);
- StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&one_closure);
- StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&cell_done);
- }
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
- literals_cell);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
+ feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
- shared_info);
+ shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
Handle<Code> lazy_builtin_handle(
- isolate->builtins()->builtin(Builtins::kCompileLazy));
+ isolate()->builtins()->builtin(Builtins::kCompileLazy));
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
- return result;
-}
-
-Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
- return TaggedIsSmi(literal_site);
-}
-
-Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
- CSA_ASSERT(this, IsAllocationSite(site));
- return LoadObjectField(site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset);
-}
-
-TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
- Node* context = Parameter(FastNewClosureDescriptor::kContext);
- Node* vector = Parameter(FastNewClosureDescriptor::kVector);
- Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
- Return(EmitFastNewClosure(shared, vector, slot, context));
+ Return(result);
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
@@ -418,7 +409,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
BIND(&create_empty_array);
CSA_ASSERT(this, IsAllocationSite(allocation_site.value()));
- Node* kind = SmiToWord32(CAST(
+ Node* kind = SmiToInt32(CAST(
LoadObjectField(allocation_site.value(),
AllocationSite::kTransitionInfoOrBoilerplateOffset)));
CSA_ASSERT(this, IsFastElementsKind(kind));
@@ -662,7 +653,7 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
@@ -687,7 +678,7 @@ TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitFastNewObject(context, target, new_target));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index ac13dcbb6d..f6d71882bc 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -15,8 +15,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
- Node* context);
Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
ScopeType scope_type);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 98e0f2c8b2..dc3e8d53c4 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -62,7 +62,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
BIND(&if_resultisnotprimitive);
{
// Somehow the @@toPrimitive method on {input} didn't yield a primitive.
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
}
}
@@ -99,7 +99,7 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
}
TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<String> input = CAST(Parameter(Descriptor::kArgument));
Return(StringToNumber(input));
}
@@ -144,7 +144,7 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Number> input = CAST(Parameter(Descriptor::kArgument));
Return(NumberToString(input));
}
@@ -208,7 +208,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
BIND(&if_methodisnotcallable);
}
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
BIND(&return_result);
Return(var_result.value());
@@ -383,20 +383,13 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Return(js_value);
BIND(&if_noconstructor);
- TailCallRuntime(Runtime::kThrowUndefinedOrNullToObject, context,
- StringConstant("ToObject"));
+ ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
+ "ToObject");
BIND(&if_jsreceiver);
Return(object);
}
-// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
-TF_BUILTIN(ClassOf, CodeStubAssembler) {
- Node* object = Parameter(TypeofDescriptor::kObject);
-
- Return(ClassOf(object));
-}
-
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
Node* object = Parameter(TypeofDescriptor::kObject);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index df7058d377..38b3d90649 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -157,6 +157,21 @@ void FlipBytes(uint8_t* target, uint8_t const* source) {
}
}
+template <typename T>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, T value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
template <typename T>
MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
@@ -196,50 +211,78 @@ MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
} else {
CopyBytes<sizeof(T)>(v.bytes, source);
}
- return isolate->factory()->NewNumber(v.data);
+ return AllocateResult<T>(isolate, v.data);
+}
+
+template <typename T>
+MaybeHandle<Object> DataViewConvertInput(Isolate* isolate,
+ Handle<Object> input) {
+ return Object::ToNumber(input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<int64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<uint64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
}
template <typename T>
-T DataViewConvertValue(double value);
+T DataViewConvertValue(Handle<Object> value);
+
+template <>
+int8_t DataViewConvertValue<int8_t>(Handle<Object> value) {
+ return static_cast<int8_t>(DoubleToInt32(value->Number()));
+}
+
+template <>
+int16_t DataViewConvertValue<int16_t>(Handle<Object> value) {
+ return static_cast<int16_t>(DoubleToInt32(value->Number()));
+}
template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
- return static_cast<int8_t>(DoubleToInt32(value));
+int32_t DataViewConvertValue<int32_t>(Handle<Object> value) {
+ return DoubleToInt32(value->Number());
}
template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
- return static_cast<int16_t>(DoubleToInt32(value));
+uint8_t DataViewConvertValue<uint8_t>(Handle<Object> value) {
+ return static_cast<uint8_t>(DoubleToUint32(value->Number()));
}
template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
- return DoubleToInt32(value);
+uint16_t DataViewConvertValue<uint16_t>(Handle<Object> value) {
+ return static_cast<uint16_t>(DoubleToUint32(value->Number()));
}
template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
- return static_cast<uint8_t>(DoubleToUint32(value));
+uint32_t DataViewConvertValue<uint32_t>(Handle<Object> value) {
+ return DoubleToUint32(value->Number());
}
template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
- return static_cast<uint16_t>(DoubleToUint32(value));
+float DataViewConvertValue<float>(Handle<Object> value) {
+ return static_cast<float>(value->Number());
}
template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
- return DoubleToUint32(value);
+double DataViewConvertValue<double>(Handle<Object> value) {
+ return value->Number();
}
template <>
-float DataViewConvertValue<float>(double value) {
- return static_cast<float>(value);
+int64_t DataViewConvertValue<int64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsInt64();
}
template <>
-double DataViewConvertValue<double>(double value) {
- return value;
+uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsUint64();
}
// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
@@ -253,7 +296,8 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Object::ToIndex(isolate, request_index,
MessageTemplate::kInvalidDataViewAccessorOffset),
Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::ToNumber(value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ DataViewConvertInput<T>(isolate, value), Object);
size_t get_index = 0;
if (!TryNumberToSize(*request_index, &get_index)) {
THROW_NEW_ERROR(
@@ -274,7 +318,7 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
T data;
uint8_t bytes[sizeof(T)];
} v;
- v.data = DataViewConvertValue<T>(value->Number());
+ v.data = DataViewConvertValue<T>(value);
size_t const buffer_offset = data_view_byte_offset + get_index;
DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
uint8_t* const target =
@@ -310,6 +354,8 @@ DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_GET(Float32, float)
DATA_VIEW_PROTOTYPE_GET(Float64, double)
+DATA_VIEW_PROTOTYPE_GET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_GET
#define DATA_VIEW_PROTOTYPE_SET(Type, type) \
@@ -334,6 +380,8 @@ DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_SET(Float32, float)
DATA_VIEW_PROTOTYPE_SET(Float64, double)
+DATA_VIEW_PROTOTYPE_SET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_SET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_SET
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index f6f3563d55..8b58c1ec80 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -61,10 +61,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
// Raise a TypeError if the receiver is not a date.
BIND(&receiver_not_date);
- {
- CallRuntime(Runtime::kThrowNotDateError, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotDateObject); }
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
@@ -240,17 +237,14 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Raise a TypeError if the {hint} is invalid.
BIND(&hint_is_invalid);
- {
- CallRuntime(Runtime::kThrowInvalidHint, context, hint);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kInvalidHint, hint); }
// Raise a TypeError if the {receiver} is not a JSReceiver instance.
BIND(&receiver_is_invalid);
{
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("Date.prototype [ @@toPrimitive ]"), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("Date.prototype [ @@toPrimitive ]"),
+ receiver);
}
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index a4a0bb9e2c..bf5b9086aa 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -70,7 +70,7 @@ namespace internal {
ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
TFC(FastNewObject, FastNewObject, 1) \
- TFC(FastNewClosure, FastNewClosure, 1) \
+ TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
@@ -92,8 +92,8 @@ namespace internal {
\
/* String helpers */ \
TFC(StringCharAt, StringAt, 1) \
- TFC(StringCharCodeAt, StringAt, 1) \
- TFC(StringCodePointAt, StringAt, 1) \
+ TFC(StringCodePointAtUTF16, StringAt, 1) \
+ TFC(StringCodePointAtUTF32, StringAt, 1) \
TFC(StringEqual, Compare, 1) \
TFC(StringGreaterThan, Compare, 1) \
TFC(StringGreaterThanOrEqual, Compare, 1) \
@@ -101,7 +101,7 @@ namespace internal {
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
TFS(StringRepeat, kString, kCount) \
- TFS(SubString, kString, kFrom, kTo) \
+ TFC(StringSubstring, StringSubstring, 1) \
\
/* OrderedHashTable helpers */ \
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
@@ -193,7 +193,6 @@ namespace internal {
TFC(ToInteger, TypeConversion, 1) \
TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
- TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
\
@@ -216,14 +215,9 @@ namespace internal {
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
- /* Promise helpers */ \
- TFS(ResolveNativePromise, kPromise, kValue) \
- TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \
- TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \
- kResultPromise) \
+ /* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
TFC(RunMicrotasks, RunMicrotasks, 1) \
- TFS(PromiseResolveThenableJob, kMicrotask) \
\
/* Object property helpers */ \
TFS(HasProperty, kKey, kObject) \
@@ -247,6 +241,10 @@ namespace internal {
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kArg) \
+ /* ES6 #sec-array.from */ \
+ TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.of */ \
+ TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
@@ -313,6 +311,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -321,6 +320,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -365,17 +365,16 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
+ TFC(AsyncFunctionAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncFunctionAwaitReject, PromiseReactionHandler, 1) \
+ TFS(AsyncFunctionAwaitCaught, kGenerator, kValue, kOuterPromise) \
+ TFS(AsyncFunctionAwaitUncaught, kGenerator, kValue, kOuterPromise) \
TFJ(AsyncFunctionPromiseCreate, 0) \
TFJ(AsyncFunctionPromiseRelease, 1, kPromise) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
CPP(BigIntConstructor_ConstructStub) \
- CPP(BigIntParseInt) \
CPP(BigIntAsUintN) \
CPP(BigIntAsIntN) \
CPP(BigIntPrototypeToLocaleString) \
@@ -457,6 +456,10 @@ namespace internal {
CPP(DataViewPrototypeSetFloat32) \
CPP(DataViewPrototypeGetFloat64) \
CPP(DataViewPrototypeSetFloat64) \
+ CPP(DataViewPrototypeGetBigInt64) \
+ CPP(DataViewPrototypeSetBigInt64) \
+ CPP(DataViewPrototypeGetBigUint64) \
+ CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
CPP(DateConstructor) \
@@ -755,7 +758,7 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
+ TFJ(ObjectEntries, 1, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -785,7 +788,7 @@ namespace internal {
/* ES #sec-object.prototype.tolocalestring */ \
TFJ(ObjectPrototypeToLocaleString, 0) \
CPP(ObjectSeal) \
- CPP(ObjectValues) \
+ TFJ(ObjectValues, 1, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -796,36 +799,42 @@ namespace internal {
TFS(ForInFilter, kKey, kObject) \
\
/* Promise */ \
+ /* ES #sec-fulfillpromise */ \
+ TFS(FulfillPromise, kPromise, kValue) \
+ /* ES #sec-rejectpromise */ \
+ TFS(RejectPromise, kPromise, kReason, kDebugEvent) \
+ /* ES #sec-promise-resolve-functions */ \
+ /* Starting at step 6 of "Promise Resolve Functions" */ \
+ TFS(ResolvePromise, kPromise, kResolution) \
+ /* ES #sec-promise-reject-functions */ \
+ TFJ(PromiseCapabilityDefaultReject, 1, kReason) \
+ /* ES #sec-promise-resolve-functions */ \
+ TFJ(PromiseCapabilityDefaultResolve, 1, kResolution) \
/* ES6 #sec-getcapabilitiesexecutor-functions */ \
TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
- TFJ(NewPromiseCapability, 2, kConstructor, kDebugEvent) \
+ TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 2, kPromise, kResult) \
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
- TFJ(PromiseInternalConstructor, 1, kParent) \
CPP(IsPromise) \
- /* ES #sec-promise-resolve-functions */ \
- TFJ(PromiseResolveClosure, 1, kValue) \
- /* ES #sec-promise-reject-functions */ \
- TFJ(PromiseRejectClosure, 1, kValue) \
- TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kOnFulfilled, kOnRejected) \
+ /* ES #sec-performpromisethen */ \
+ TFS(PerformPromiseThen, kPromise, kOnFulfilled, kOnRejected, kResultPromise) \
/* ES #sec-promise.prototype.catch */ \
TFJ(PromisePrototypeCatch, 1, kOnRejected) \
- /* ES #sec-fulfillpromise */ \
- TFJ(ResolvePromise, 2, kPromise, kValue) \
- TFS(PromiseHandleReject, kPromise, kOnReject, kException) \
- TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \
- kDeferredOnReject) \
- TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \
- kDeferredOnResolve, kDeferredOnReject) \
+ /* ES #sec-promisereactionjob */ \
+ TFS(PromiseRejectReactionJob, kReason, kHandler, kPayload) \
+ TFS(PromiseFulfillReactionJob, kValue, kHandler, kPayload) \
+ /* ES #sec-promiseresolvethenablejob */ \
+ TFS(PromiseResolveThenableJob, kPromiseToResolve, kThenable, kThen) \
/* ES #sec-promise.resolve */ \
- TFJ(PromiseResolveWrapper, 1, kValue) \
+ TFJ(PromiseResolveTrampoline, 1, kValue) \
+ /* ES #sec-promise-resolve */ \
TFS(PromiseResolve, kConstructor, kValue) \
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
- TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
TFJ(PromisePrototypeFinally, 1, kOnFinally) \
TFJ(PromiseThenFinally, 1, kValue) \
TFJ(PromiseCatchFinally, 1, kReason) \
@@ -833,8 +842,15 @@ namespace internal {
TFJ(PromiseThrowerFinally, 0) \
/* ES #sec-promise.all */ \
TFJ(PromiseAll, 1, kIterable) \
+ TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.race */ \
TFJ(PromiseRace, 1, kIterable) \
+ /* V8 Extras: v8.createPromise(parent) */ \
+ TFJ(PromiseInternalConstructor, 1, kParent) \
+ /* V8 Extras: v8.rejectPromise(promise, reason) */ \
+ TFJ(PromiseInternalReject, 2, kPromise, kReason) \
+ /* V8 Extras: v8.resolvePromise(promise, resolution) */ \
+ TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
\
/* Proxy */ \
TFJ(ProxyConstructor, 0) \
@@ -1032,9 +1048,8 @@ namespace internal {
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimLeft, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimRight, \
+ TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
TFJ(StringPrototypeValueOf, 0) \
@@ -1062,16 +1077,13 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0) \
\
/* TypedArray */ \
+ TFS(IterableToList, kIterable, kIteratorFn) \
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
kByteOffset) \
- /* ES6 #sec-typedarray-buffer-byteoffset-length */ \
- TFJ(TypedArrayConstructByArrayBuffer, 5, kHolder, kBuffer, kByteOffset, \
- kLength, kElementSize) \
- TFJ(TypedArrayConstructByArrayLike, 4, kHolder, kArrayLike, kLength, \
- kElementSize) \
- /* ES6 #sec-typedarray-length */ \
- TFJ(TypedArrayConstructByLength, 3, kHolder, kLength, kElementSize) \
+ TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -1089,6 +1101,9 @@ namespace internal {
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
CPP(TypedArrayPrototypeFill) \
+ /* ES6 #sec-%typedarray%.prototype.filter */ \
+ TFJ(TypedArrayPrototypeFilter, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.find */ \
TFJ(TypedArrayPrototypeFind, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1106,7 +1121,11 @@ namespace internal {
/* ES6 %TypedArray%.prototype.set */ \
TFJ(TypedArrayPrototypeSet, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-%typedarray%.prototype.slice */ \
- CPP(TypedArrayPrototypeSlice) \
+ TFJ(TypedArrayPrototypeSlice, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.subarray */ \
+ TFJ(TypedArrayPrototypeSubArray, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0) \
/* ES6 %TypedArray%.prototype.every */ \
@@ -1126,6 +1145,10 @@ namespace internal {
/* ES6 %TypedArray%.prototype.forEach */ \
TFJ(TypedArrayPrototypeForEach, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.of */ \
+ TFJ(TypedArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.from */ \
+ TFJ(TypedArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -1159,6 +1182,17 @@ namespace internal {
\
/* AsyncGenerator */ \
\
+ /* Await (proposal-async-iteration/#await), with resume behaviour */ \
+ /* specific to Async Generators. Internal / Not exposed to JS code. */ \
+ TFS(AsyncGeneratorAwaitCaught, kGenerator, kValue) \
+ TFS(AsyncGeneratorAwaitUncaught, kGenerator, kValue) \
+ TFC(AsyncGeneratorAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorAwaitReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorYieldFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnFulfill, PromiseReactionHandler, 1) \
+ \
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
TFS(AsyncGeneratorReject, kGenerator, kValue) \
TFS(AsyncGeneratorYield, kGenerator, kValue, kIsCaught) \
@@ -1181,17 +1215,6 @@ namespace internal {
TFJ(AsyncGeneratorPrototypeThrow, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
- /* Await (proposal-async-iteration/#await), with resume behaviour */ \
- /* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kValue) \
- \
/* Async-from-Sync Iterator */ \
\
/* %AsyncFromSyncIteratorPrototype% */ \
@@ -1240,25 +1263,16 @@ namespace internal {
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
V(AsyncFromSyncIteratorPrototypeThrow) \
- V(AsyncFunctionAwaitCaught) \
- V(AsyncFunctionAwaitUncaught) \
V(AsyncGeneratorResolve) \
- V(AsyncGeneratorAwaitCaught) \
- V(AsyncGeneratorAwaitUncaught) \
- V(PerformNativePromiseThen) \
V(PromiseAll) \
V(PromiseConstructor) \
- V(PromiseHandle) \
+ V(PromiseFulfillReactionJob) \
V(PromiseRace) \
- V(PromiseResolve) \
- V(PromiseResolveClosure) \
- V(RejectNativePromise) \
- V(ResolveNativePromise) \
V(ResolvePromise)
// The exception thrown in the following builtins are caught internally and will
// not be propagated further or re-thrown
-#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseHandleReject)
+#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseRejectReactionJob)
#define IGNORE_BUILTIN(...)
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 771c7243ac..cc6d237af6 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -288,26 +288,22 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
-// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
-// can tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_FunctionBind) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
- BuiltinArguments caller_args(argc, incoming->arguments() + 1);
- return DoFunctionBind(isolate, caller_args);
-}
-
// ES6 section 19.2.3.5 Function.prototype.toString ( )
BUILTIN(FunctionPrototypeToString) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (receiver->IsJSBoundFunction()) {
return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
- } else if (receiver->IsJSFunction()) {
+ }
+ if (receiver->IsJSFunction()) {
return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
}
+ // With the revised toString behavior, all callable objects are valid
+ // receivers for this method.
+ if (FLAG_harmony_function_tostring && receiver->IsJSReceiver() &&
+ JSReceiver::cast(*receiver)->map()->is_callable()) {
+ return isolate->heap()->function_native_code_string();
+ }
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b063b314b5..07a56c86ed 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -84,9 +84,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
BIND(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
BIND(&if_receiverisclosed);
@@ -110,10 +109,7 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
}
BIND(&if_receiverisrunning);
- {
- CallRuntime(Runtime::kThrowGeneratorRunning, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kGeneratorRunning); }
BIND(&if_exception);
{
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bb4b66e3a4..edc529c798 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -73,8 +73,8 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
- Node* length = SmiToWord(Parameter(Descriptor::kLength));
- Node* mapped_count = SmiToWord(Parameter(Descriptor::kMappedCount));
+ Node* length = SmiToIntPtr(Parameter(Descriptor::kLength));
+ Node* mapped_count = SmiToIntPtr(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -164,8 +164,8 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
{
// Allocate in old space (or large object space).
TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
- BitcastWordToTagged(frame), SmiFromWord(length),
- SmiFromWord(mapped_count));
+ BitcastWordToTagged(frame), SmiFromIntPtr(length),
+ SmiFromIntPtr(mapped_count));
}
}
@@ -202,7 +202,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
Node* bits = Load(MachineType::Int32(), cell);
Node* bit_0 = Word32And(bits, mask);
@@ -239,7 +239,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
// Non-white has 1 for the first bit, so we only need to check for the first
// bit.
return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
@@ -628,6 +628,9 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> context);
void LeaveMicrotaskContext();
+ void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload);
+
TNode<Object> GetPendingException() {
auto ref = ExternalReference(kPendingExceptionAddress, isolate());
return TNode<Object>::UncheckedCast(
@@ -745,6 +748,19 @@ void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
}
}
+void InternalBuiltinsAssembler::RunPromiseHook(
+ Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActive(), &hook, &done_hook);
+ BIND(&hook);
+ {
+ CallRuntime(id, context, payload);
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
Node* microtask = Parameter(Descriptor::kMicrotask);
@@ -812,13 +828,15 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
}
TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
- Label init_queue_loop(this);
+ // Load the current context from the isolate.
+ TNode<Context> current_context = GetCurrentContext();
+ Label init_queue_loop(this);
Goto(&init_queue_loop);
BIND(&init_queue_loop);
{
TVARIABLE(IntPtrT, index, IntPtrConstant(0));
- Label loop(this, &index);
+ Label loop(this, &index), loop_next(this);
TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
@@ -830,222 +848,193 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
SetPendingMicrotaskCount(IntPtrConstant(0));
- SetMicrotaskQueue(
- TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant()));
+ SetMicrotaskQueue(EmptyFixedArrayConstant());
Goto(&loop);
BIND(&loop);
{
- TNode<HeapObject> microtask =
- TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index));
- index = IntPtrAdd(index, IntPtrConstant(1));
+ TNode<HeapObject> microtask = TNode<HeapObject>::UncheckedCast(
+ LoadFixedArrayElement(queue, index.value()));
+ index = IntPtrAdd(index.value(), IntPtrConstant(1));
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
- Label is_call_handler_info(this);
- Label is_function(this);
- Label is_promise_resolve_thenable_job(this);
- Label is_promise_reaction_job(this);
- Label is_unreachable(this);
-
- int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo
- JS_FUNCTION_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
- PROMISE_REACTION_JOB_INFO_TYPE};
-
- Label* case_labels[] = {&is_call_handler_info, &is_function,
- &is_promise_resolve_thenable_job,
- &is_promise_reaction_job};
-
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Label if_exception(this, Label::kDeferred);
+ Label is_callable(this), is_callback(this),
+ is_promise_fulfill_reaction_job(this),
+ is_promise_reject_reaction_job(this),
+ is_promise_resolve_thenable_job(this),
+ is_unreachable(this, Label::kDeferred);
+
+ int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
+ Label* case_labels[] = {
+ &is_callable, &is_callback, &is_promise_fulfill_reaction_job,
+ &is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
static_assert(arraysize(case_values) == arraysize(case_labels), "");
Switch(microtask_type, &is_unreachable, case_values, case_labels,
arraysize(case_labels));
- BIND(&is_call_handler_info);
+ BIND(&is_callable);
{
- // Bailout to C++ slow path for the remainder of the loop.
- auto index_ref =
- ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate());
- auto count_ref =
- ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate());
- auto rep = kIntSize == 4 ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
-
- // index was pre-incremented, decrement for bailout to C++.
- Node* value = IntPtrSub(index, IntPtrConstant(1));
-
- if (kPointerSize == 4) {
- DCHECK_EQ(kIntSize, 4);
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks);
- } else {
- Node* count = num_tasks;
- if (kIntSize == 4) {
- value = TruncateInt64ToInt32(value);
- count = TruncateInt64ToInt32(count);
- }
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count);
- }
-
- Return(queue);
- }
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context =
+ LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
- BIND(&is_function);
- {
- Label cont(this);
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> fn_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, JSFunction::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(fn_context));
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
- EnterMicrotaskContext(fn_context);
- Node* const call = CallJS(CodeFactory::Call(isolate()), native_context,
- microtask, UndefinedConstant());
- GotoIfException(call, &cont);
- Goto(&cont);
- BIND(&cont);
+
+ TNode<JSReceiver> callable = LoadObjectField<JSReceiver>(
+ microtask, CallableTask::kCallableOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ microtask_context, callable, UndefinedConstant());
+ GotoIfException(result, &if_exception, &var_exception);
LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_callback);
+ {
+ Node* const microtask_callback =
+ LoadObjectField(microtask, CallbackTask::kCallbackOffset);
+ Node* const microtask_data =
+ LoadObjectField(microtask, CallbackTask::kDataOffset);
+
+ // If this turns out to become a bottleneck because of the calls
+ // to C++ via CEntryStub, we can choose to speed them up using a
+ // similar mechanism that we use for the CallApiFunction stub,
+ // except that calling the MicrotaskCallback is even easier, since
+ // it doesn't accept any tagged parameters, doesn't return a value
+ // and ignores exceptions.
+ //
+ // But from our current measurements it doesn't seem to be a
+ // serious performance problem, even if the microtask is full
+ // of CallHandlerTasks (which is not a realistic use case anyways).
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ Goto(&loop_next);
}
BIND(&is_promise_resolve_thenable_job);
{
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context =
- TNode<Context>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseResolveThenableJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
+
+ Node* const promise_to_resolve = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
+ Node* const then = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenOffset);
+ Node* const thenable = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ GotoIfException(result, &if_exception, &var_exception);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_promise_fulfill_reaction_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
- Label if_unhandled_exception(this), done(this);
- Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob,
- native_context, microtask);
- GotoIfException(ret, &if_unhandled_exception, &exception);
- Goto(&done);
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
- BIND(&if_unhandled_exception);
- CallRuntime(Runtime::kReportMessage, native_context, exception.value());
- Goto(&done);
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
- BIND(&done);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
- BIND(&is_promise_reaction_job);
+ BIND(&is_promise_reject_reaction_job);
{
- Label if_multiple(this);
- Label if_single(this);
-
- Node* const value =
- LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset);
- Node* const tasks =
- LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset);
- Node* const deferred_promises = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredPromiseOffset);
- Node* const deferred_on_resolves = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset);
- Node* const deferred_on_rejects = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset);
-
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
- SetCurrentContext(native_context);
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
+
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
+
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single);
-
- BIND(&if_single);
- {
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks,
- deferred_promises, deferred_on_resolves,
- deferred_on_rejects);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
-
- BIND(&if_multiple);
- {
- TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0));
- TNode<IntPtrT> inner_length =
- LoadAndUntagFixedArrayBaseLength(deferred_promises);
- Label inner_loop(this, &inner_index), done(this);
-
- CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0)));
- Goto(&inner_loop);
- BIND(&inner_loop);
- {
- Node* const task = LoadFixedArrayElement(tasks, inner_index);
- Node* const deferred_promise =
- LoadFixedArrayElement(deferred_promises, inner_index);
- Node* const deferred_on_resolve =
- LoadFixedArrayElement(deferred_on_resolves, inner_index);
- Node* const deferred_on_reject =
- LoadFixedArrayElement(deferred_on_rejects, inner_index);
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, task,
- deferred_promise, deferred_on_resolve,
- deferred_on_reject);
- inner_index = IntPtrAdd(inner_index, IntPtrConstant(1));
- Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop,
- &done);
- }
- BIND(&done);
-
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
-
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
BIND(&is_unreachable);
Unreachable();
- }
- }
-}
-TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) {
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- Callable call = CodeFactory::Call(isolate());
- Label reject_promise(this, Label::kDeferred);
- TNode<PromiseResolveThenableJobInfo> microtask =
- TNode<PromiseResolveThenableJobInfo>::UncheckedCast(
- Parameter(Descriptor::kMicrotask));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
-
- TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kThenableOffset));
- TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset));
- TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kResolveOffset));
- TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset));
-
- Node* const result = CallJS(call, context, then, thenable, resolve, reject);
- GotoIfException(result, &reject_promise, &exception);
- Return(UndefinedConstant());
+ BIND(&if_exception);
+ {
+ // Report unhandled exceptions from microtasks.
+ CallRuntime(Runtime::kReportMessage, current_context,
+ var_exception.value());
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
- BIND(&reject_promise);
- CallJS(call, context, reject, UndefinedConstant(), exception.value());
- Return(UndefinedConstant());
+ BIND(&loop_next);
+ Branch(IntPtrLessThan(index.value(), num_tasks), &loop, &init_queue_loop);
+ }
+ }
}
TF_BUILTIN(AbortJS, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
index 8dda0c0898..419ff14db1 100644
--- a/deps/v8/src/builtins/builtins-intl.h
+++ b/deps/v8/src/builtins/builtins-intl.h
@@ -27,4 +27,4 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_H_
+#endif // V8_BUILTINS_BUILTINS_INTL_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index f6a6d85880..21f6039f08 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -11,11 +11,24 @@ namespace internal {
using compiler::Node;
+Node* IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
+ Node* object) {
+ return GetProperty(context, object, factory()->iterator_symbol());
+}
+
IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Node* object,
Label* if_exception,
Variable* exception) {
- Node* method = GetProperty(context, object, factory()->iterator_symbol());
+ Node* method = GetIteratorMethod(context, object);
+ return GetIterator(context, object, method, if_exception, exception);
+}
+
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
+ Node* object,
+ Node* method,
+ Label* if_exception,
+ Variable* exception) {
GotoIfException(method, if_exception, exception);
Callable callable = CodeFactory::Call(isolate());
@@ -27,13 +40,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
BIND(&if_notobject);
- {
- Node* ret =
- CallRuntime(Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kNotAnIterator), iterator);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotAnIterator, iterator); }
BIND(&get_next);
Node* const next = GetProperty(context, iterator, factory()->next_string());
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 42627b8437..13464516d6 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -17,11 +17,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+ // Returns object[Symbol.iterator].
+ Node* GetIteratorMethod(Node* context, Node* object);
+
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
IteratorRecord GetIterator(Node* context, Node* object,
Label* if_exception = nullptr,
Variable* exception = nullptr);
+ IteratorRecord GetIterator(Node* context, Node* object, Node* method,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// Returns `false` if the iterator is done, otherwise returns an
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index d588113cdd..be58e8210e 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -268,7 +268,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
BIND(&if_xissmi);
{
- var_clz32_x.Bind(SmiToWord32(x));
+ var_clz32_x.Bind(SmiToInt32(x));
Goto(&do_clz32);
}
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 821dac9cc0..1340c33eb1 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -319,12 +319,14 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
&if_inputissigned32);
- // Check if the absolute {input} value is in the ]0.01,1e9[ range.
+ // Check if the absolute {input} value is in the [1,1<<31[ range.
+ // Take the generic path for the range [0,1[ because the result
+ // could be -0.
Node* input_value_abs = Float64Abs(input_value);
- GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)),
&if_generic);
- Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+ Branch(Float64LessThanOrEqual(Float64Constant(1), input_value_abs),
&if_inputissigned32, &if_generic);
// Return the truncated int32 value, and return the tagged result.
@@ -904,8 +906,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
}
BIND(&dividend_is_not_zero);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ Node* untagged_divisor = SmiToInt32(divisor);
+ Node* untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -929,7 +931,7 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- Return(SmiFromWord32(untagged_result));
+ Return(SmiFromInt32(untagged_result));
// Bailout: convert {dividend} and {divisor} to double and do double
// division.
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 4cd012e6f0..1ebfbacf38 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -16,6 +16,8 @@ namespace internal {
// ES6 section 19.1 Object Objects
typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
Node* enumerable, Node* configurable);
Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+
+ Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
+};
+
+class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
+ public:
+ explicit ObjectEntriesValuesBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : ObjectBuiltinsAssembler(state) {}
+
+ protected:
+ enum CollectType { kEntries, kValues };
+
+ TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+
+ TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details);
+
+ TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind);
+
+ TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
+
+ TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
+
+ TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
+ return DecodeWord32<PropertyDetails::KindField>(details);
+ }
+
+ void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type);
+
+ void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
+ TNode<JSArray> FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type);
+
+ TNode<JSArray> FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> values_or_entries,
+ TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -97,6 +139,253 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
return js_desc;
}
+Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+TNode<Word32T>
+ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind(
+ TNode<Map> map) {
+ Node* kind = LoadMapElementsKind(map);
+ return Word32Or(
+ Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
+ TNode<Uint32T> details) {
+ TNode<Uint32T> attributes =
+ DecodeWord32<PropertyDetails::AttributesField>(details);
+ return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM);
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+}
+
+TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
+ TNode<Map> map) {
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
+ TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type) {
+ TNode<JSReceiver> receiver = ToObject(context, maybe_object);
+
+ Label if_call_runtime_with_fast_path(this, Label::kDeferred),
+ if_call_runtime(this, Label::kDeferred),
+ if_no_properties(this, Label::kDeferred);
+
+ TNode<Map> map = LoadMap(receiver);
+ GotoIfNot(IsJSObjectMap(map), &if_call_runtime);
+ GotoIfMapHasSlowProperties(map, &if_call_runtime);
+
+ TNode<JSObject> object = CAST(receiver);
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ // If the object has elements, we treat it as slow case.
+ // So, we go to runtime call.
+ GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path);
+
+ TNode<JSArray> result = FastGetOwnValuesOrEntries(
+ context, object, &if_call_runtime_with_fast_path, &if_no_properties,
+ collect_type);
+ Return(result);
+
+ BIND(&if_no_properties);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Return(empty_array);
+ }
+
+ BIND(&if_call_runtime_with_fast_path);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntries, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValues, context, object));
+ }
+ }
+
+ BIND(&if_call_runtime);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(
+ CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, receiver));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(
+ CallRuntime(Runtime::kObjectValuesSkipFastPath, context, receiver));
+ }
+ }
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
+ TNode<Map> map, Label* if_slow) {
+ GotoIf(IsStringWrapperElementsKind(map), if_slow);
+ GotoIf(IsSpecialReceiverMap(map), if_slow);
+ GotoIf(HasHiddenPrototype(map), if_slow);
+ GotoIf(IsDictionaryMap(map), if_slow);
+}
+
+TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type) {
+ Node* native_context = LoadNativeContext(context);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Map> map = LoadMap(object);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+
+ Label if_has_enum_cache(this), if_not_has_enum_cache(this),
+ collect_entries(this);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+ Node* has_enum_cache = WordNotEqual(
+ object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
+
+ // In case, we found enum_cache in object,
+ // we use it as array_length becuase it has same size for
+ // Object.(entries/values) result array object length.
+ // So object_enum_length use less memory space than
+ // NumberOfOwnDescriptorsBits value.
+ // And in case, if enum_cache_not_found,
+ // we call runtime and initialize enum_cache for subsequent call of
+ // CSA fast path.
+ Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path);
+
+ BIND(&if_has_enum_cache);
+ {
+ GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
+ TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
+ AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ // If in case we have enum_cache,
+ // we can't detect accessor of object until loop through descritpros.
+ // So if object might have accessor,
+ // we will remain invalid addresses of FixedArray.
+ // Because in that case, we need to jump to runtime call.
+ // So the array filled by the-hole even if enum_cache exists.
+ FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
+ IntPtrConstant(0), object_enum_length,
+ Heap::kTheHoleValueRootIndex);
+
+ TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0));
+ Variable* vars[] = {&var_descriptor_number, &var_result_index};
+ // Let desc be ? O.[[GetOwnProperty]](key).
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label loop(this, 2, vars), after_loop(this), loop_condition(this);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+
+ // We dont use BuildFastLoop.
+ // Instead, we use hand-written loop
+ // because of we need to use 'continue' functionality.
+ BIND(&loop);
+ {
+ // Currently, we will not invoke getters,
+ // so, map will not be changed.
+ CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+ TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+ Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+
+ // Skip Symbols.
+ GotoIf(IsSymbol(next_key), &loop_condition);
+
+ TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
+ DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> kind = LoadPropertyKind(details);
+
+ // If property is accessor, we escape fast path and call runtime.
+ GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
+ CSA_ASSERT(this, IsPropertyKindData(kind));
+
+ // If desc is not undefined and desc.[[Enumerable]] is true, then
+ GotoIfNot(IsPropertyEnumerable(details), &loop_condition);
+
+ VARIABLE(var_property_value, MachineRepresentation::kTagged,
+ UndefinedConstant());
+ Node* descriptor_name_index = DescriptorArrayToKeyIndex(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+
+ // Let value be ? Get(O, key).
+ LoadPropertyFromFastObject(object, map, descriptors,
+ descriptor_name_index, details,
+ &var_property_value);
+
+ // If kind is "value", append value to properties.
+ Node* value = var_property_value.value();
+
+ if (collect_type == CollectType::kEntries) {
+ // Let entry be CreateArrayFromList(« key, value »).
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
+ IntPtrConstant(2));
+ StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ value = array;
+ }
+
+ StoreFixedArrayElement(values_or_entries, var_result_index.value(),
+ value);
+ Increment(&var_result_index, 1);
+ Goto(&loop_condition);
+
+ BIND(&loop_condition);
+ {
+ Increment(&var_descriptor_number, 1);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+ }
+ }
+ BIND(&after_loop);
+ return FinalizeValuesOrEntriesJSArray(context, values_or_entries,
+ var_result_index.value(), array_map,
+ if_no_properties);
+ }
+}
+
+TNode<JSArray>
+ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
+ TNode<Map> array_map, Label* if_empty) {
+ CSA_ASSERT(this, IsJSArrayMap(array_map));
+
+ GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, SmiTag(size), nullptr);
+ StoreObjectField(array, JSArray::kElementsOffset, result);
+ return TNode<JSArray>::UncheckedCast(array);
+}
+
TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -105,7 +394,7 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
TNode<Object> method =
- CAST(GetProperty(context, receiver, factory()->toString_string()));
+ GetProperty(context, receiver, factory()->toString_string());
Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
BIND(&if_null_or_undefined);
@@ -266,6 +555,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kValues);
+}
+
+TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kEntries);
+}
+
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -304,7 +609,7 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
// Simulate the ToObject invocation on {receiver}.
- CallBuiltin(Builtins::kToObject, context, receiver);
+ ToObject(context, receiver);
Unreachable();
}
@@ -367,9 +672,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring);
BIND(&if_tagisnotstring);
{
- var_tag.Bind(
- CallStub(Builtins::CallableFor(isolate(), Builtins::kClassOf),
- context, receiver));
+ var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver));
Goto(&if_tagisstring);
}
BIND(&if_tagisstring);
@@ -574,9 +877,8 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&return_generic);
{
- Node* tag = GetProperty(
- context, CallBuiltin(Builtins::kToObject, context, receiver),
- LoadRoot(Heap::kto_string_tag_symbolRootIndex));
+ Node* tag = GetProperty(context, ToObject(context, receiver),
+ LoadRoot(Heap::kto_string_tag_symbolRootIndex));
GotoIf(TaggedIsSmi(tag), &return_default);
GotoIfNot(IsString(tag), &return_default);
ReturnToStringFormat(context, tag);
@@ -592,7 +894,7 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
- Return(CallBuiltin(Builtins::kToObject, context, receiver));
+ Return(ToObject(context, receiver));
}
// ES #sec-object.create
@@ -760,7 +1062,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
- Label runtime(this);
+ Label done(this), runtime(this);
GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
Node* maybe_map =
LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
@@ -790,7 +1092,13 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- Return(result);
+ GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ { Return(result); }
BIND(&runtime);
{
@@ -810,7 +1118,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
Node* key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- object = CallBuiltin(Builtins::kToObject, context, object);
+ object = ToObject(context, object);
// 2. Let key be ? ToPropertyKey(P).
key = ToName(context, key);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 36f7ebfc0a..4e353b9260 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
BUILTIN(ObjectGetOwnPropertyDescriptors) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1a3ebcd892..d3ea3f82e2 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -22,19 +22,26 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const initial_map =
+ Node* const promise_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const instance = AllocateJSObjectFromMap(initial_map);
- return instance;
+ Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields);
+ StoreMapNoWriteBarrier(promise, promise_map);
+ StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ return promise;
}
void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
STATIC_ASSERT(v8::Promise::kPending == 0);
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kReactionsOrResultOffset,
+ SmiConstant(Smi::kZero));
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset,
- SmiConstant(0));
+ SmiConstant(Smi::kZero));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(0));
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
}
}
@@ -58,9 +65,11 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
Node* context, v8::Promise::PromiseState status, Node* result) {
- Node* const instance = AllocateJSPromise(context);
+ DCHECK_NE(Promise::kPending, status);
- StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+ Node* const instance = AllocateJSPromise(context);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset,
+ result);
STATIC_ASSERT(JSPromise::kStatusShift == 0);
StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
SmiConstant(status));
@@ -86,66 +95,68 @@ PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
promise, debug_event, native_context);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve_info =
- LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* const resolve_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
Node* const resolve =
AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
- Node* const reject_info =
- LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+ Node* const reject_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX);
Node* const reject =
AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
return std::make_pair(resolve, reject);
}
-Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
- Node* constructor,
- Node* debug_event) {
- if (debug_event == nullptr) {
- debug_event = TrueConstant();
- }
+// ES #sec-newpromisecapability
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const constructor = Parameter(Descriptor::kConstructor);
+ Node* const debug_event = Parameter(Descriptor::kDebugEvent);
+ Node* const native_context = LoadNativeContext(context);
- Label if_not_constructor(this, Label::kDeferred);
+ Label if_not_constructor(this, Label::kDeferred),
+ if_notcallable(this, Label::kDeferred), if_fast_promise_capability(this),
+ if_slow_promise_capability(this, Label::kDeferred);
GotoIf(TaggedIsSmi(constructor), &if_not_constructor);
GotoIfNot(IsConstructorMap(LoadMap(constructor)), &if_not_constructor);
-
- Node* native_context = LoadNativeContext(context);
-
- Node* map = LoadRoot(Heap::kTuple3MapRootIndex);
- Node* capability = AllocateStruct(map);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(capability);
-
- Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
- out(this);
Branch(WordEqual(constructor,
LoadContextElement(native_context,
Context::PROMISE_FUNCTION_INDEX)),
- &if_builtin_promise, &if_custom_promise);
+ &if_fast_promise_capability, &if_slow_promise_capability);
- BIND(&if_builtin_promise);
+ BIND(&if_fast_promise_capability);
{
- Node* promise = AllocateJSPromise(context);
- PromiseInit(promise);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
+ Node* promise =
+ AllocateAndInitJSPromise(native_context, UndefinedConstant());
Node* resolve = nullptr;
Node* reject = nullptr;
-
std::tie(resolve, reject) =
CreatePromiseResolvingFunctions(promise, debug_event, native_context);
- StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
- StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
- CallRuntime(Runtime::kPromiseHookInit, context, promise,
- UndefinedConstant());
- Goto(&out);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kPromiseOffset, promise);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kResolveOffset, resolve);
+ StoreObjectFieldNoWriteBarrier(capability, PromiseCapability::kRejectOffset,
+ reject);
+ Return(capability);
}
- BIND(&if_custom_promise);
+ BIND(&if_slow_promise_capability);
{
- Label if_notcallable(this, Label::kDeferred);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kResolveOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset,
+ Heap::kUndefinedValueRootIndex);
+
Node* executor_context =
CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
Node* executor_info = LoadContextElement(
@@ -155,8 +166,9 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* executor = AllocateFunctionWithMapAndContext(
function_map, executor_info, executor_context);
- Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
- constructor, executor);
+ Node* promise = ConstructJS(CodeFactory::Construct(isolate()),
+ native_context, constructor, executor);
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Node* resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
@@ -167,26 +179,14 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
GotoIfNot(IsCallable(reject), &if_notcallable);
-
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
-
- Goto(&out);
-
- BIND(&if_notcallable);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kResolveOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kRejectOffset,
- UndefinedConstant());
- ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
+ Return(capability);
}
BIND(&if_not_constructor);
ThrowTypeError(context, MessageTemplate::kNotConstructor, constructor);
- BIND(&out);
- return var_result.value();
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
@@ -236,7 +236,7 @@ Node* PromiseBuiltinsAssembler::IsPromiseStatus(
Node* PromiseBuiltinsAssembler::PromiseStatus(Node* promise) {
STATIC_ASSERT(JSPromise::kStatusShift == 0);
Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
- return Word32And(SmiToWord32(flags), Int32Constant(JSPromise::kStatusMask));
+ return Word32And(SmiToInt32(flags), Int32Constant(JSPromise::kStatusMask));
}
void PromiseBuiltinsAssembler::PromiseSetStatus(
@@ -258,579 +258,299 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
}
-Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor) {
- Isolate* isolate = this->isolate();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(default_constructor);
-
- // 2. Let C be ? Get(O, "constructor").
- Node* const constructor =
- GetProperty(context, object, isolate->factory()->constructor_string());
-
- // 3. If C is undefined, return defaultConstructor.
- Label out(this);
- GotoIf(IsUndefined(constructor), &out);
-
- // 4. If Type(C) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, constructor,
- MessageTemplate::kConstructorNotReceiver);
-
- // 5. Let S be ? Get(C, @@species).
- Node* const species =
- GetProperty(context, constructor, isolate->factory()->species_symbol());
-
- // 6. If S is either undefined or null, return defaultConstructor.
- GotoIf(IsNullOrUndefined(species), &out);
-
- // 7. If IsConstructor(S) is true, return S.
- Label throw_error(this);
- GotoIf(TaggedIsSmi(species), &throw_error);
- GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
- var_result.Bind(species);
- Goto(&out);
-
- // 8. Throw a TypeError exception.
- BIND(&throw_error);
- ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
-
- BIND(&out);
- return var_result.value();
-}
-
-void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
- Node* value) {
- Node* elements = LoadObjectField(promise, offset);
- Node* length = LoadFixedArrayBaseLength(elements);
- CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
- length = TaggedToParameter(length, mode);
-
- Node* delta = IntPtrOrSmiConstant(1, mode);
- Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
-
- const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- int additional_offset = 0;
-
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kFixedArrays;
- Node* new_elements =
- ExtractFixedArray(elements, nullptr, length, new_capacity, flags, mode);
-
- StoreFixedArrayElement(new_elements, length, value, barrier_mode,
- additional_offset, mode);
-
- StoreObjectField(promise, offset, new_elements);
-}
-
-Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
- Node* promise,
- Node* on_resolve,
- Node* on_reject) {
- Isolate* isolate = this->isolate();
-
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
- "Promise.prototype.then");
-
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-
- // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- Node* constructor = SpeciesConstructor(context, promise, promise_fun);
-
- // 4. Let resultCapability be ? NewPromiseCapability(C).
- Callable call_callable = CodeFactory::Call(isolate);
- Label fast_promise_capability(this), promise_capability(this),
- perform_promise_then(this);
- VARIABLE(var_deferred_promise, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
-
- Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
- &promise_capability);
-
- BIND(&fast_promise_capability);
- {
- Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
- var_deferred_promise.Bind(deferred_promise);
- var_deferred_on_resolve.Bind(UndefinedConstant());
- var_deferred_on_reject.Bind(UndefinedConstant());
- Goto(&perform_promise_then);
- }
-
- BIND(&promise_capability);
- {
- Node* const capability = NewPromiseCapability(context, constructor);
- var_deferred_promise.Bind(
- LoadObjectField(capability, PromiseCapability::kPromiseOffset));
- var_deferred_on_resolve.Bind(
- LoadObjectField(capability, PromiseCapability::kResolveOffset));
- var_deferred_on_reject.Bind(
- LoadObjectField(capability, PromiseCapability::kRejectOffset));
- Goto(&perform_promise_then);
- }
-
- // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
- // resultCapability).
- BIND(&perform_promise_then);
- Node* const result = InternalPerformPromiseThen(
- context, promise, on_resolve, on_reject, var_deferred_promise.value(),
- var_deferred_on_resolve.value(), var_deferred_on_reject.value());
- return result;
-}
-
-Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
- Node* context, Node* promise, Node* on_resolve, Node* on_reject,
- Node* deferred_promise, Node* deferred_on_resolve,
- Node* deferred_on_reject) {
- VARIABLE(var_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_on_reject, MachineRepresentation::kTagged);
-
- var_on_resolve.Bind(on_resolve);
- var_on_reject.Bind(on_reject);
-
- Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
- append_callbacks(this);
- GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
-
- Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable);
-
- BIND(&if_onresolvenotcallable);
- {
- var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant());
- Goto(&onrejectcheck);
- }
-
- BIND(&onrejectcheck);
+// ES #sec-performpromisethen
+void PromiseBuiltinsAssembler::PerformPromiseThen(
+ Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected,
+ Node* result_promise_or_capability) {
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+ CSA_ASSERT(this,
+ Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled)));
+ CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected)));
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability));
+ CSA_ASSERT(this, Word32Or(IsJSPromise(result_promise_or_capability),
+ IsPromiseCapability(result_promise_or_capability)));
+
+ Label if_pending(this), if_notpending(this), done(this);
+ Node* const status = PromiseStatus(promise);
+ Branch(IsPromiseStatus(status, v8::Promise::kPending), &if_pending,
+ &if_notpending);
+
+ BIND(&if_pending);
{
- Label if_onrejectnotcallable(this);
- GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
-
- Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable);
-
- BIND(&if_onrejectnotcallable);
- {
- var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant());
- Goto(&append_callbacks);
- }
+ // The {promise} is still in "Pending" state, so we just record a new
+ // PromiseReaction holding both the onFulfilled and onRejected callbacks.
+ // Once the {promise} is resolved we decide on the concrete handler to
+ // push onto the microtask queue.
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const reaction =
+ AllocatePromiseReaction(promise_reactions, result_promise_or_capability,
+ on_fulfilled, on_rejected);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ Goto(&done);
}
- BIND(&append_callbacks);
+ BIND(&if_notpending);
{
- Label fulfilled_check(this);
- Node* const status = PromiseStatus(promise);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kPending), &fulfilled_check);
-
- Node* const existing_deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- Label if_noexistingcallbacks(this), if_existingcallbacks(this);
- Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
- &if_existingcallbacks);
-
- BIND(&if_noexistingcallbacks);
+ VARIABLE(var_map, MachineRepresentation::kTagged);
+ VARIABLE(var_handler, MachineRepresentation::kTagged);
+ Label if_fulfilled(this), if_rejected(this, Label::kDeferred),
+ enqueue(this);
+ Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled,
+ &if_rejected);
+
+ BIND(&if_fulfilled);
{
- // Store callbacks directly in the slots.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- var_on_resolve.value());
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- var_on_reject.value());
- Goto(&out);
+ var_map.Bind(LoadRoot(Heap::kPromiseFulfillReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_fulfilled);
+ Goto(&enqueue);
}
- BIND(&if_existingcallbacks);
+ BIND(&if_rejected);
{
- Label if_singlecallback(this), if_multiplecallbacks(this);
- BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
- &if_multiplecallbacks);
-
- BIND(&if_singlecallback);
- {
- // Create new FixedArrays to store callbacks, and migrate
- // existing callbacks.
- Node* const deferred_promise_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(deferred_promise_arr, 0,
- existing_deferred_promise);
- StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
-
- Node* const deferred_on_resolve_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_resolve_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
- StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
-
- Node* const deferred_on_reject_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_reject_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
- StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
-
- Node* const fulfill_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- fulfill_reactions, 0,
- LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
- StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
-
- Node* const reject_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- reject_reactions, 0,
- LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
- StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
-
- // Store new FixedArrays in promise.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject_arr);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- fulfill_reactions);
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- reject_reactions);
- Goto(&out);
- }
-
- BIND(&if_multiplecallbacks);
- {
- AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
- deferred_promise);
- AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
- deferred_on_resolve);
- AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
- deferred_on_reject);
- AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
- var_on_resolve.value());
- AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
- var_on_reject.value());
- Goto(&out);
- }
+ CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
+ var_map.Bind(LoadRoot(Heap::kPromiseRejectReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_rejected);
+ GotoIf(PromiseHasHandler(promise), &enqueue);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+ Goto(&enqueue);
}
- BIND(&fulfilled_check);
- {
- Label reject(this);
- Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kFulfilled), &reject);
-
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
- deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
-
- BIND(&reject);
- {
- CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
- Node* const has_handler = PromiseHasHandler(promise);
- Label enqueue(this);
-
- // TODO(gsathya): Fold these runtime calls and move to TF.
- GotoIf(has_handler, &enqueue);
- CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
- Goto(&enqueue);
-
- BIND(&enqueue);
- {
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_reject.value(), deferred_promise,
- deferred_on_resolve, deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
- }
- }
+ BIND(&enqueue);
+ Node* argument =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* microtask = AllocatePromiseReactionJobTask(
+ var_map.value(), context, argument, var_handler.value(),
+ result_promise_or_capability);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), microtask);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
PromiseSetHasHandler(promise);
- return deferred_promise;
}
-// Promise fast path implementations rely on unmodified JSPromise instances.
-// We use a fairly coarse granularity for this and simply check whether both
-// the promise itself is unmodified (i.e. its map has not changed) and its
-// prototype is unmodified.
-// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
- if_ismodified);
-}
-
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
- Node* promise_fun,
- Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- CSA_ASSERT(this, IsNativeContext(native_context));
- CSA_ASSERT(this,
- WordEqual(promise_fun,
- LoadContextElement(native_context,
- Context::PROMISE_FUNCTION_INDEX)));
-
- GotoIfForceSlowPath(if_ismodified);
-
- Node* const map = LoadMap(promise);
- Node* const initial_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = WordEqual(map, initial_map);
-
- GotoIfNot(has_initialmap, if_ismodified);
+// ES #sec-performpromisethen
+TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
+ Node* const result_promise = Parameter(Descriptor::kResultPromise);
- Node* const initial_proto_initial_map =
- LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = LoadMap(CAST(LoadMapPrototype(map)));
- Node* const proto_has_initialmap =
- WordEqual(proto_map, initial_proto_initial_map);
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise));
+ CSA_ASSERT(this, IsJSPromise(result_promise));
- Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ PerformPromiseThen(context, promise, on_fulfilled, on_rejected,
+ result_promise);
+ Return(result_promise);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
- Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
- Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
- StoreMapNoWriteBarrier(info,
- Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(Node* next,
+ Node* payload,
+ Node* fulfill_handler,
+ Node* reject_handler) {
+ Node* const reaction = Allocate(PromiseReaction::kSize);
+ StoreMapNoWriteBarrier(reaction, Heap::kPromiseReactionMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kPayloadOffset,
+ payload);
+ StoreObjectFieldNoWriteBarrier(
+ reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+ reaction, PromiseReaction::kRejectHandlerOffset, reject_handler);
+ return reaction;
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Node* map, Node* context, Node* argument, Node* handler, Node* payload) {
+ Node* const microtask = Allocate(PromiseReactionJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask, map);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenOffset, then);
+ microtask, PromiseReactionJobTask::kArgumentOffset, argument);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+ microtask, PromiseReactionJobTask::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+ microtask, PromiseReactionJobTask::kHandlerOffset, handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kContextOffset, context);
- return info;
+ microtask, PromiseReactionJobTask::kPayloadOffset, payload);
+ return microtask;
}
-void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
- Node* promise,
- Node* result) {
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
- VARIABLE(var_then, MachineRepresentation::kTagged);
-
- Label do_enqueue(this), fulfill(this), if_nocycle(this),
- if_cycle(this, Label::kDeferred),
- if_rejectpromise(this, Label::kDeferred), out(this);
-
- Label cycle_check(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &cycle_check);
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&cycle_check);
-
- BIND(&cycle_check);
- // 6. If SameValue(resolution, promise) is true, then
- BranchIfSameValue(promise, result, &if_cycle, &if_nocycle);
- BIND(&if_nocycle);
-
- // 7. If Type(resolution) is not Object, then
- GotoIf(TaggedIsSmi(result), &fulfill);
- GotoIfNot(IsJSReceiver(result), &fulfill);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Heap::RootListIndex map_root_index, Node* context, Node* argument,
+ Node* handler, Node* payload) {
+ DCHECK(map_root_index == Heap::kPromiseFulfillReactionJobTaskMapRootIndex ||
+ map_root_index == Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ Node* const map = LoadRoot(map_root_index);
+ return AllocatePromiseReactionJobTask(map, context, argument, handler,
+ payload);
+}
- Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
- &if_notnativepromise);
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
+ Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
+ Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask,
+ Heap::kPromiseResolveThenableJobTaskMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kContextOffset, context);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset,
+ promise_to_resolve);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenOffset, then);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable);
+ return microtask;
+}
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
- BIND(&if_nativepromise);
+// ES #sec-triggerpromisereactions
+Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
+ Node* context, Node* reactions, Node* argument,
+ PromiseReaction::Type type) {
+ // We need to reverse the {reactions} here, since we record them on the
+ // JSPromise in the reverse order.
{
- Node* const thenable_status = PromiseStatus(result);
- Node* const thenable_value =
- LoadObjectField(result, JSPromise::kResultOffset);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
+ VARIABLE(var_reversed, MachineRepresentation::kTagged,
+ SmiConstant(Smi::kZero));
- Label if_isnotpending(this);
- GotoIfNot(IsPromiseStatus(thenable_status, v8::Promise::kPending),
- &if_isnotpending);
-
- // TODO(gsathya): Use a marker here instead of the actual then
- // callback, and check for the marker in PromiseResolveThenableJob
- // and perform PromiseThen.
- Node* const then =
- LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
- var_then.Bind(then);
- Goto(&do_enqueue);
-
- BIND(&if_isnotpending);
+ Label loop(this, {&var_current, &var_reversed}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
{
- Label if_fulfilled(this), if_rejected(this);
- Branch(IsPromiseStatus(thenable_status, v8::Promise::kFulfilled),
- &if_fulfilled, &if_rejected);
-
- BIND(&if_fulfilled);
- {
- PromiseFulfill(context, promise, thenable_value,
- v8::Promise::kFulfilled);
- PromiseSetHasHandler(promise);
- Goto(&out);
- }
-
- BIND(&if_rejected);
- {
- Label reject(this);
- Node* const has_handler = PromiseHasHandler(result);
-
- // Promise has already been rejected, but had no handler.
- // Revoke previously triggered reject event.
- GotoIf(has_handler, &reject);
- CallRuntime(Runtime::kPromiseRevokeReject, context, result);
- Goto(&reject);
-
- BIND(&reject);
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, thenable_value, false);
- PromiseSetHasHandler(result);
- Goto(&out);
- }
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+ StoreObjectField(current, PromiseReaction::kNextOffset,
+ var_reversed.value());
+ var_reversed.Bind(current);
+ Goto(&loop);
}
+ BIND(&done_loop);
+ reactions = var_reversed.value();
}
- BIND(&if_notnativepromise);
+ // Morph the {reactions} into PromiseReactionJobTasks and push them
+ // onto the microtask queue.
{
- // 8. Let then be Get(resolution, "then").
- Node* const then =
- GetProperty(context, result, isolate->factory()->then_string());
-
- // 9. If then is an abrupt completion, then
- GotoIfException(then, &if_rejectpromise, &var_reason);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
- // 11. If IsCallable(thenAction) is false, then
- GotoIf(TaggedIsSmi(then), &fulfill);
- Node* const then_map = LoadMap(then);
- GotoIfNot(IsCallableMap(then_map), &fulfill);
- var_then.Bind(then);
- Goto(&do_enqueue);
+ Label loop(this, {&var_current}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+
+ // Morph {current} from a PromiseReaction into a PromiseReactionJobTask
+ // and schedule that on the microtask queue. We try to minimize the number
+ // of stores here to avoid screwing up the store buffer.
+ STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ if (type == PromiseReaction::kFulfill) {
+ StoreMapNoWriteBarrier(
+ current, Heap::kPromiseFulfillReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
+ PromiseReactionJobTask::kHandlerOffset);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ } else {
+ Node* handler =
+ LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
+ StoreMapNoWriteBarrier(current,
+ Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ StoreObjectField(current, PromiseReactionJobTask::kHandlerOffset,
+ handler);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ }
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), current);
+ Goto(&loop);
+ }
+ BIND(&done_loop);
}
- BIND(&do_enqueue);
- {
- // TODO(gsathya): Add fast path for native promises with unmodified
- // PromiseThen (which don't need these resolving functions, but
- // instead can just call resolve/reject directly).
- Node* resolve = nullptr;
- Node* reject = nullptr;
- std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
- promise, FalseConstant(), native_context);
-
- Node* const info = AllocatePromiseResolveThenableJobInfo(
- result, var_then.value(), resolve, reject, context);
-
- Label enqueue(this);
- GotoIfNot(IsDebugActive(), &enqueue);
-
- GotoIf(TaggedIsSmi(result), &enqueue);
- GotoIfNot(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
-
- // Mark the dependency of the new promise on the resolution
- Node* const key =
- HeapConstant(isolate->factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, result, key, promise,
- SmiConstant(LanguageMode::kStrict));
- Goto(&enqueue);
-
- // 12. Perform EnqueueJob("PromiseJobs",
- // PromiseResolveThenableJob, « promise, resolution, thenAction»).
- BIND(&enqueue);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
+ return UndefinedConstant();
+}
- // 7.b Return FulfillPromise(promise, resolution).
- BIND(&fulfill);
- {
- PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
- Goto(&out);
- }
+template <typename... TArgs>
+Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
+ TArgs... args) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_cycle);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
+ GotoIf(TaggedIsSmi(receiver), &if_slow);
+ Node* const receiver_map = LoadMap(receiver);
+ // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ BranchIfPromiseThenLookupChainIntact(native_context, receiver_map, &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
{
- // 6.a Let selfResolutionError be a newly created TypeError object.
- Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
- Node* const error =
- CallRuntime(Runtime::kNewTypeError, context, message_id, result);
- var_reason.Bind(error);
-
- // 6.b Return RejectPromise(promise, selfResolutionError).
- Goto(&if_rejectpromise);
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ Node* const result =
+ CallJS(CodeFactory::CallFunction(
+ isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- // 9.a Return RejectPromise(promise, then.[[Value]]).
- BIND(&if_rejectpromise);
+ BIND(&if_slow);
{
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, var_reason.value(), false);
- Goto(&out);
+ Node* const then = GetProperty(native_context, receiver,
+ isolate()->factory()->then_string());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
+ return var_result.value();
}
-void PromiseBuiltinsAssembler::PromiseFulfill(
- Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status) {
- Label do_promisereset(this);
-
- Node* const deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- GotoIf(IsUndefined(deferred_promise), &do_promisereset);
-
- Node* const tasks =
- status == v8::Promise::kFulfilled
- ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
- : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
-
- Node* const deferred_on_resolve =
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
- Node* const deferred_on_reject =
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
+ Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this, IsJSPromiseMap(promise_map));
- Node* const info = AllocatePromiseReactionJobInfo(
- result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
- context);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
+ if_slow);
+ Branch(IsSpeciesProtectorCellInvalid(), if_slow, if_fast);
+}
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&do_promisereset);
+void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
+ Node* native_context, Node* receiver_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsMap(receiver_map));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&do_promisereset);
- {
- PromiseSetStatus(promise, status);
- StoreObjectField(promise, JSPromise::kResultOffset, result);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- }
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(IsJSPromiseMap(receiver_map), if_slow);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(receiver_map), promise_prototype),
+ if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), if_slow, if_fast);
}
void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
@@ -878,43 +598,6 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
BIND(&has_access);
}
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- Node* debug_event) {
- Label out(this);
- GotoIfNot(IsDebugActive(), &out);
- GotoIfNot(WordEqual(TrueConstant(), debug_event), &out);
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- Goto(&out);
-
- BIND(&out);
- InternalPromiseReject(context, promise, value, false);
-}
-
-// This duplicates a lot of logic from PromiseRejectEvent in
-// runtime-promise.cc
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- bool debug_event) {
- Label fulfill(this), exit(this);
-
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &fulfill);
- if (debug_event) {
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- }
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&fulfill);
-
- BIND(&fulfill);
- PromiseFulfill(context, promise, value, v8::Promise::kRejected);
-
- GotoIf(PromiseHasHandler(promise), &exit);
- CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
- Goto(&exit);
-
- BIND(&exit);
-}
-
void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
Node* context, Node* condition, const NodeGenerator& object) {
Label done(this);
@@ -940,40 +623,52 @@ void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
BIND(&done);
}
-void PromiseBuiltinsAssembler::PerformFulfillClosure(Node* context, Node* value,
- bool should_resolve) {
- Label out(this);
+// ES #sec-promise-reject-functions
+TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
// 2. Let promise be F.[[Promise]].
- Node* const promise_slot = IntPtrConstant(kPromiseSlot);
- Node* const promise = LoadContextElement(context, promise_slot);
-
- // We use `undefined` as a marker to know that this callback was
- // already called.
- GotoIf(IsUndefined(promise), &out);
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
- if (should_resolve) {
- InternalResolvePromise(context, promise, value);
- } else {
- Node* const debug_event =
- LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
- InternalPromiseReject(context, promise, value, debug_event);
- }
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
- StoreContextElement(context, promise_slot, UndefinedConstant());
- Goto(&out);
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
- BIND(&out);
+ // 6. Return RejectPromise(promise, reason).
+ Node* const debug_event = LoadContextElement(context, kDebugEventSlot);
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ debug_event));
}
-// ES#sec-promise-reject-functions
-// Promise Reject Functions
-TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- PerformFulfillClosure(context, value, false);
- Return(UndefinedConstant());
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
+
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
+
+ // The rest of the logic (and the catch prediction) is
+ // encapsulated in the dedicated ResolvePromise builtin.
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
+TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
+ Node* promise = Parameter(Descriptor::kPromise);
+ Return(promise);
}
// ES6 #sec-promise-executor
@@ -1089,231 +784,357 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
}
}
+// V8 Extras: v8.createPromise(parent)
TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
Node* const parent = Parameter(Descriptor::kParent);
Node* const context = Parameter(Descriptor::kContext);
Return(AllocateAndInitJSPromise(context, parent));
}
+// V8 Extras: v8.rejectPromise(promise, reason)
+TF_BUILTIN(PromiseInternalReject, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
+ // We pass true to trigger the debugger's on exception handler.
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ TrueConstant()));
+}
+
+// V8 Extras: v8.resolvePromise(promise, resolution)
+TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
+ Node* const context = Parameter(Descriptor::kContext);
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
// ES#sec-promise.prototype.then
// Promise.prototype.then ( onFulfilled, onRejected )
TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = Parameter(Descriptor::kOnFullfilled);
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
-}
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.then");
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred),
+ slow_promise_capability(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const promise_map = LoadMap(promise);
+ BranchIfPromiseSpeciesLookupChainIntact(
+ native_context, promise_map, &fast_promise_capability, &slow_constructor);
- PerformFulfillClosure(context, value, true);
- Return(UndefinedConstant());
-}
+ BIND(&slow_constructor);
+ Node* const constructor =
+ SpeciesConstructor(native_context, promise, promise_fun);
+ Branch(WordEqual(constructor, promise_fun), &fast_promise_capability,
+ &slow_promise_capability);
-// ES #sec-fulfillpromise
-TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const result = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ Label perform_promise_then(this);
+ VARIABLE(var_result_promise, MachineRepresentation::kTagged);
+ VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged);
- InternalResolvePromise(context, promise, result);
- Return(UndefinedConstant());
+ BIND(&fast_promise_capability);
+ {
+ Node* const result_promise = AllocateAndInitJSPromise(context, promise);
+ var_result_promise_or_capability.Bind(result_promise);
+ var_result_promise.Bind(result_promise);
+ Goto(&perform_promise_then);
+ }
+
+ BIND(&slow_promise_capability);
+ {
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
+ var_result_promise.Bind(
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset));
+ var_result_promise_or_capability.Bind(capability);
+ Goto(&perform_promise_then);
+ }
+
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ BIND(&perform_promise_then);
+ {
+ // We do some work of the PerformPromiseThen operation here, in that
+ // we check the handlers and turn non-callable handlers into undefined.
+ // This is because this is the one and only callsite of PerformPromiseThen
+ // that has to do this.
+
+ // 3. If IsCallable(onFulfilled) is false, then
+ // a. Set onFulfilled to undefined.
+ VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled);
+ Label if_fulfilled_done(this), if_fulfilled_notcallable(this);
+ GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable);
+ Branch(IsCallable(on_fulfilled), &if_fulfilled_done,
+ &if_fulfilled_notcallable);
+ BIND(&if_fulfilled_notcallable);
+ var_on_fulfilled.Bind(UndefinedConstant());
+ Goto(&if_fulfilled_done);
+ BIND(&if_fulfilled_done);
+
+ // 4. If IsCallable(onRejected) is false, then
+ // a. Set onRejected to undefined.
+ VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected);
+ Label if_rejected_done(this), if_rejected_notcallable(this);
+ GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable);
+ Branch(IsCallable(on_rejected), &if_rejected_done,
+ &if_rejected_notcallable);
+ BIND(&if_rejected_notcallable);
+ var_on_rejected.Bind(UndefinedConstant());
+ Goto(&if_rejected_done);
+ BIND(&if_rejected_done);
+
+ PerformPromiseThen(context, promise, var_on_fulfilled.value(),
+ var_on_rejected.value(),
+ var_result_promise_or_capability.value());
+ Return(var_result_promise.value());
+ }
}
-TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const on_reject = Parameter(Descriptor::kOnReject);
- Node* const exception = Parameter(Descriptor::kException);
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const on_fulfilled = UndefinedConstant();
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- VARIABLE(var_unused, MachineRepresentation::kTagged);
+ // 2. Return ? Invoke(promise, "then", « undefined, onRejected »).
+ Node* const native_context = LoadNativeContext(context);
+ Return(InvokeThen(native_context, receiver, on_fulfilled, on_rejected));
+}
- Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+// ES #sec-promiseresolvethenablejob
+TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
+ Node* const native_context = Parameter(Descriptor::kContext);
+ Node* const promise_to_resolve = Parameter(Descriptor::kPromiseToResolve);
+ Node* const thenable = Parameter(Descriptor::kThenable);
+ Node* const then = Parameter(Descriptor::kThen);
+
+ CSA_ASSERT(this, TaggedIsNotSmi(thenable));
+ CSA_ASSERT(this, IsJSReceiver(thenable));
+ CSA_ASSERT(this, IsJSPromise(promise_to_resolve));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_internalhandler);
+ // We can use a simple optimization here if we know that {then} is the initial
+ // Promise.prototype.then method, and {thenable} is a JSPromise whose
+ // @@species lookup chain is intact: We can connect {thenable} and
+ // {promise_to_resolve} directly in that case and avoid the allocation of a
+ // temporary JSPromise and the closures plus context.
+ //
+ // We take the generic (slow-)path if a PromiseHook is enabled or the debugger
+ // is active, to make sure we expose spec compliant behavior.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const promise_then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ GotoIfNot(WordEqual(then, promise_then), &if_slow);
+ Node* const thenable_map = LoadMap(thenable);
+ GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_slow);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, thenable_map,
+ &if_fast, &if_slow);
+
+ BIND(&if_fast);
{
- InternalPromiseReject(context, promise, exception, false);
- Return(UndefinedConstant());
+ // We know that the {thenable} is a JSPromise, which doesn't require
+ // any special treatment and that {then} corresponds to the initial
+ // Promise.prototype.then method. So instead of allocating a temporary
+ // JSPromise to connect the {thenable} with the {promise_to_resolve},
+ // we can directly schedule the {promise_to_resolve} with default
+ // handlers onto the {thenable} promise. This does not only save the
+ // JSPromise allocation, but also avoids the allocation of the two
+ // resolving closures and the shared context.
+ //
+ // What happens normally in this case is
+ //
+ // resolve, reject = CreateResolvingFunctions(promise_to_resolve)
+ // result_capability = NewPromiseCapability(%Promise%)
+ // PerformPromiseThen(thenable, resolve, reject, result_capability)
+ //
+ // which means that PerformPromiseThen will either schedule a new
+ // PromiseReaction with resolve and reject or a PromiseReactionJob
+ // with resolve or reject based on the state of {thenable}. And
+ // resolve or reject will just invoke the default [[Resolve]] or
+ // [[Reject]] functions on the {promise_to_resolve}.
+ //
+ // This is the same as just doing
+ //
+ // PerformPromiseThen(thenable, undefined, undefined, promise_to_resolve)
+ //
+ // which performs exactly the same (observable) steps.
+ TailCallBuiltin(Builtins::kPerformPromiseThen, native_context, thenable,
+ UndefinedConstant(), UndefinedConstant(),
+ promise_to_resolve);
}
- BIND(&if_customhandler);
+ BIND(&if_slow);
{
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ promise_to_resolve, FalseConstant(), native_context);
+
+ Label if_exception(this, Label::kDeferred);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
- Label if_exception(this);
- Node* const ret = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, on_reject, UndefinedConstant(), exception);
- GotoIfException(ret, &if_exception, &var_exception);
- Return(UndefinedConstant());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, thenable, resolve, reject);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
+
BIND(&if_exception);
- CallRuntime(Runtime::kReportMessage, context, var_exception.value());
- Return(UndefinedConstant());
+ {
+ // We need to reject the {thenable}.
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ native_context, UndefinedConstant(), var_exception.value());
+ Return(result);
+ }
}
}
-TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
+// ES #sec-promisereactionjob
+void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
+ Node* handler, Node* payload,
+ PromiseReaction::Type type) {
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ CSA_ASSERT(this, Word32Or(IsCallable(handler),
+ Word32Or(IsCode(handler), IsUndefined(handler))));
+ CSA_ASSERT(this, TaggedIsNotSmi(payload));
- Node* const is_debug_active = IsDebugActive();
- Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
- promisehook_after(this), debug_pop(this);
+ VARIABLE(var_handler_result, MachineRepresentation::kTagged, argument);
+ Label if_handler_callable(this), if_fulfill(this), if_reject(this),
+ if_code_handler(this);
- GotoIfNot(is_debug_active, &promisehook_before);
- CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
- Goto(&promisehook_before);
+ GotoIf(IsUndefined(handler),
+ type == PromiseReaction::kFulfill ? &if_fulfill : &if_reject);
+ Branch(IsCode(handler), &if_code_handler, &if_handler_callable);
- BIND(&promisehook_before);
+ BIND(&if_code_handler);
{
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &run_handler);
- CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
- Goto(&run_handler);
+ // The {handler} is a Code object that knows how to deal with
+ // the {payload} and the {argument}.
+ PromiseReactionHandlerDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, context, argument, payload);
}
- BIND(&run_handler);
+ BIND(&if_handler_callable);
{
- Label if_defaulthandler(this), if_callablehandler(this),
- if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, handler, UndefinedConstant(), argument);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ var_handler_result.Bind(result);
+ Goto(&if_fulfill);
+ }
- Branch(IsSymbol(handler), &if_defaulthandler, &if_callablehandler);
+ BIND(&if_fulfill);
+ {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const value = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_defaulthandler);
+ BIND(&if_promise);
{
- Label if_resolve(this), if_reject(this);
- Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve,
- &if_reject);
-
- BIND(&if_resolve);
- {
- var_result.Bind(value);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
- }
-
- BIND(&if_reject);
- {
- var_reason.Bind(value);
- Goto(&if_rejectpromise);
- }
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Resolve]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kResolvePromise, context, payload, value);
}
- BIND(&if_callablehandler);
+ BIND(&if_promise_capability);
{
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Resolve]] function.
+ Node* const resolve =
+ LoadObjectField(payload, PromiseCapability::kResolveOffset);
Node* const result = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, handler, UndefinedConstant(), value);
- var_result.Bind(result);
- GotoIfException(result, &if_rejectpromise, &var_reason);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ Return(result);
}
+ }
- BIND(&if_internalhandler);
- InternalResolvePromise(context, deferred_promise, var_result.value());
- Goto(&promisehook_after);
+ BIND(&if_reject);
+ if (type == PromiseReaction::kReject) {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const reason = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_customhandler);
+ BIND(&if_promise);
{
- Node* const maybe_exception = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, deferred_on_resolve, UndefinedConstant(),
- var_result.value());
- GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
- Goto(&promisehook_after);
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Reject]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kRejectPromise, context, payload, reason,
+ FalseConstant());
}
- }
-
- BIND(&if_rejectpromise);
- {
- CallBuiltin(Builtins::kPromiseHandleReject, context, deferred_promise,
- deferred_on_reject, var_reason.value());
- Goto(&promisehook_after);
- }
-
- BIND(&promisehook_after);
- {
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_pop);
- CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
- Goto(&debug_pop);
- }
-
- BIND(&debug_pop);
- {
- Label out(this);
- GotoIfNot(is_debug_active, &out);
- CallRuntime(Runtime::kDebugPopPromise, context);
- Goto(&out);
+ BIND(&if_promise_capability);
+ {
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Reject]] function.
+ Label if_exception(this, Label::kDeferred);
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Node* const reject =
+ LoadObjectField(payload, PromiseCapability::kRejectOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), reason);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
- BIND(&out);
- Return(UndefinedConstant());
+ // Swallow the exception here.
+ BIND(&if_exception);
+ TailCallRuntime(Runtime::kReportMessage, context, var_exception.value());
+ }
+ } else {
+ // We have to call out to the dedicated PromiseRejectReactionJob builtin
+ // here, instead of just doing the work inline, as otherwise the catch
+ // predictions in the debugger will be wrong, which just walks the stack
+ // and checks for certain builtins.
+ TailCallBuiltin(Builtins::kPromiseRejectReactionJob, context,
+ var_handler_result.value(), UndefinedConstant(), payload);
}
}
-TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) {
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseFulfillReactionJob, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Node* const result =
- CallBuiltin(Builtins::kPromiseHandle, context, value, handler,
- deferred_promise, deferred_on_resolve, deferred_on_reject);
- Return(result);
+ PromiseReactionJob(context, value, handler, payload,
+ PromiseReaction::kFulfill);
}
-// ES#sec-promise.prototype.catch
-// Promise.prototype.catch ( onRejected )
-TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
- // 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = UndefinedConstant();
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseRejectReactionJob, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Label if_internalthen(this), if_customthen(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(promise), &if_customthen);
- BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
-
- BIND(&if_internalthen);
- {
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
- }
-
- BIND(&if_customthen);
- {
- Node* const then =
- GetProperty(context, promise, isolate()->factory()->then_string());
- Node* const result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, then, promise, on_resolve, on_reject);
- Return(result);
- }
+ PromiseReactionJob(context, reason, handler, payload,
+ PromiseReaction::kReject);
}
-TF_BUILTIN(PromiseResolveWrapper, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromiseResolveTrampoline, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* receiver = Parameter(Descriptor::kReceiver);
Node* value = Parameter(Descriptor::kValue);
@@ -1331,51 +1152,49 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
Node* constructor = Parameter(Descriptor::kConstructor);
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
+
+ CSA_ASSERT(this, IsJSReceiver(constructor));
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Label if_value_is_native_promise(this),
- if_value_or_constructor_are_not_native_promise(this),
- if_need_to_allocate(this);
+ Label if_slow_constructor(this, Label::kDeferred), if_need_to_allocate(this);
+ // Check if {value} is a JSPromise.
GotoIf(TaggedIsSmi(value), &if_need_to_allocate);
-
- // This shortcircuits the constructor lookups.
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_need_to_allocate);
-
- // This adds a fast path as non-subclassed native promises don't have
- // an observable constructor lookup.
- BranchIfFastPath(native_context, promise_fun, value,
- &if_value_is_native_promise,
- &if_value_or_constructor_are_not_native_promise);
-
- BIND(&if_value_is_native_promise);
- {
- GotoIfNot(WordEqual(promise_fun, constructor),
- &if_value_or_constructor_are_not_native_promise);
- Return(value);
- }
+ Node* const value_map = LoadMap(value);
+ GotoIfNot(IsJSPromiseMap(value_map), &if_need_to_allocate);
+
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+ &if_slow_constructor);
+ GotoIf(IsSpeciesProtectorCellInvalid(), &if_slow_constructor);
+
+ // If the {constructor} is the Promise function, we just immediately
+ // return the {value} here and don't bother wrapping it into a
+ // native Promise.
+ GotoIfNot(WordEqual(promise_fun, constructor), &if_slow_constructor);
+ Return(value);
// At this point, value or/and constructor are not native promises, but
// they could be of the same subclass.
- BIND(&if_value_or_constructor_are_not_native_promise);
+ BIND(&if_slow_constructor);
{
- Label if_return(this);
- Node* const xConstructor =
- GetProperty(context, value, isolate->factory()->constructor_string());
- BranchIfSameValue(xConstructor, constructor, &if_return,
- &if_need_to_allocate);
-
- BIND(&if_return);
+ Node* const value_constructor =
+ GetProperty(context, value, isolate()->factory()->constructor_string());
+ GotoIfNot(WordEqual(value_constructor, constructor), &if_need_to_allocate);
Return(value);
}
BIND(&if_need_to_allocate);
{
- Label if_nativepromise(this), if_notnativepromise(this);
+ Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
Branch(WordEqual(promise_fun, constructor), &if_nativepromise,
&if_notnativepromise);
@@ -1384,18 +1203,21 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
BIND(&if_nativepromise);
{
Node* const result = AllocateAndInitJSPromise(context);
- InternalResolvePromise(context, result, value);
+ CallBuiltin(Builtins::kResolvePromise, context, result, value);
Return(result);
}
BIND(&if_notnativepromise);
{
- Node* const capability = NewPromiseCapability(context, constructor);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, resolve, UndefinedConstant(), value);
+ CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
Node* const result =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -1429,17 +1251,6 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kPromiseExecutorAlreadyInvoked);
}
-// ES6 #sec-newpromisecapability
-TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
- Node* constructor = Parameter(Descriptor::kConstructor);
- Node* debug_event = Parameter(Descriptor::kDebugEvent);
- Node* context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
-
- Return(NewPromiseCapability(context, constructor, debug_event));
-}
-
TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* const receiver = Parameter(Descriptor::kReceiver);
@@ -1470,7 +1281,9 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
BIND(&if_custompromise);
{
// 3. Let promiseCapability be ? NewPromiseCapability(C).
- Node* const capability = NewPromiseCapability(context, receiver);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, receiver, debug_event);
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
@@ -1485,16 +1298,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
}
}
-TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const reason = Parameter(Descriptor::kReason);
- Node* const debug_event = Parameter(Descriptor::kDebugEvent);
- Node* const context = Parameter(Descriptor::kContext);
-
- InternalPromiseReject(context, promise, reason, debug_event);
- Return(UndefinedConstant());
-}
-
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
Node* on_finally, Node* constructor, Node* native_context) {
Node* const promise_context =
@@ -1565,16 +1368,11 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let valueThunk be equivalent to a function that returns value.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const value_thunk = CreateValueThunkFunction(value, native_context);
// 8. Return ? Invoke(promise, "then", « valueThunk »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, value_thunk);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, value_thunk));
}
TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
@@ -1627,35 +1425,44 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let thrower be equivalent to a function that throws reason.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const thrower = CreateThrowerFunction(reason, native_context);
// 8. Return ? Invoke(promise, "then", « thrower »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, thrower);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, thrower));
}
TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
// 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const on_finally = Parameter(Descriptor::kOnFinally);
Node* const context = Parameter(Descriptor::kContext);
// 2. If Type(promise) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject,
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"Promise.prototype.finally");
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Node* const constructor = SpeciesConstructor(context, promise, promise_fun);
+ VARIABLE(var_constructor, MachineRepresentation::kTagged, promise_fun);
+ Label slow_constructor(this, Label::kDeferred), done_constructor(this);
+ Node* const receiver_map = LoadMap(receiver);
+ GotoIfNot(IsJSPromiseMap(receiver_map), &slow_constructor);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, receiver_map,
+ &done_constructor, &slow_constructor);
+ BIND(&slow_constructor);
+ {
+ Node* const constructor =
+ SpeciesConstructor(context, receiver, promise_fun);
+ var_constructor.Bind(constructor);
+ Goto(&done_constructor);
+ }
+ BIND(&done_constructor);
+ Node* const constructor = var_constructor.value();
// 4. Assert: IsConstructor(C) is true.
CSA_ASSERT(this, IsConstructor(constructor));
@@ -1697,50 +1504,172 @@ TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
// 7. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
BIND(&perform_finally);
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, var_then_finally.value(),
- var_catch_finally.value());
- Return(result_promise);
+ Return(InvokeThen(native_context, receiver, var_then_finally.value(),
+ var_catch_finally.value()));
}
-TF_BUILTIN(ResolveNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-fulfillpromise
+TF_BUILTIN(FulfillPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
- InternalResolvePromise(context, promise, value);
- Return(UndefinedConstant());
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Node* const reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ PromiseSetStatus(promise, Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ Return(TriggerPromiseReactions(context, reactions, value,
+ PromiseReaction::kFulfill));
}
-TF_BUILTIN(RejectNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-rejectpromise
+TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const value = Parameter(Descriptor::kValue);
+ Node* const reason = Parameter(Descriptor::kReason);
Node* const debug_event = Parameter(Descriptor::kDebugEvent);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
CSA_ASSERT(this, IsBoolean(debug_event));
- InternalPromiseReject(context, promise, value, debug_event);
- Return(UndefinedConstant());
+ Label if_runtime(this, Label::kDeferred);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ // We don't try to handle rejecting {promise} without handler
+ // here, but we let the C++ code take care of this completely.
+ GotoIfNot(PromiseHasHandler(promise), &if_runtime);
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Node* reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ PromiseSetStatus(promise, Promise::kRejected);
+
+ // 7. Return TriggerPromiseReactions(reactions, reason).
+ Return(TriggerPromiseReactions(context, reactions, reason,
+ PromiseReaction::kReject));
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kRejectPromise, context, promise, reason,
+ debug_event);
}
-TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const resolve_reaction = Parameter(Descriptor::kResolveReaction);
- Node* const reject_reaction = Parameter(Descriptor::kRejectReaction);
- Node* const result_promise = Parameter(Descriptor::kResultPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(result_promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
- InternalPerformPromiseThen(context, promise, resolve_reaction,
- reject_reaction, result_promise,
- UndefinedConstant(), UndefinedConstant());
- Return(result_promise);
+ Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred),
+ if_runtime(this, Label::kDeferred);
+ VARIABLE(var_reason, MachineRepresentation::kTagged);
+ VARIABLE(var_then, MachineRepresentation::kTagged);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 6. If SameValue(resolution, promise) is true, then
+ // We can use pointer comparison here, since the {promise} is guaranteed
+ // to be a JSPromise inside this function and thus is reference comparable.
+ GotoIf(WordEqual(promise, resolution), &if_runtime);
+
+ // 7. If Type(resolution) is not Object, then
+ GotoIf(TaggedIsSmi(resolution), &if_fulfill);
+ Node* const result_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
+ &if_slow);
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ BIND(&if_fast);
+ {
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&if_slow);
+ {
+ // 8. Let then be Get(resolution, "then").
+ Node* const then =
+ GetProperty(context, resolution, isolate()->factory()->then_string());
+
+ // 9. If then is an abrupt completion, then
+ GotoIfException(then, &if_reject, &var_reason);
+
+ // 11. If IsCallable(thenAction) is false, then
+ GotoIf(TaggedIsSmi(then), &if_fulfill);
+ Node* const then_map = LoadMap(then);
+ GotoIfNot(IsCallableMap(then_map), &if_fulfill);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&do_enqueue);
+ {
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Node* const task = AllocatePromiseResolveThenableJobTask(
+ promise, var_then.value(), resolution, native_context);
+ TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task);
+ }
+
+ BIND(&if_fulfill);
+ {
+ // 7.b Return FulfillPromise(promise, resolution).
+ TailCallBuiltin(Builtins::kFulfillPromise, context, promise, resolution);
+ }
+
+ BIND(&if_runtime);
+ Return(CallRuntime(Runtime::kResolvePromise, context, promise, resolution));
+
+ BIND(&if_reject);
+ {
+ // 9.a Return RejectPromise(promise, then.[[Value]]).
+ TailCallBuiltin(Builtins::kRejectPromise, context, promise,
+ var_reason.value(), FalseConstant());
+ }
}
Node* PromiseBuiltinsAssembler::PerformPromiseAll(
@@ -1802,9 +1731,6 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve_context =
CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementAlreadyVisitedSlot,
- SmiConstant(0));
- StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
@@ -1944,7 +1870,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
Label reject_promise(this, &var_exception, Label::kDeferred);
@@ -1987,19 +1914,16 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
- Label already_called(this), resolve_promise(this);
- GotoIf(SmiEqual(LoadContextElement(
- context, kPromiseAllResolveElementAlreadyVisitedSlot),
- SmiConstant(1)),
- &already_called);
- StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementAlreadyVisitedSlot, SmiConstant(1));
-
Node* const index =
LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
Node* const values_array =
LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+ Label already_called(this, Label::kDeferred), resolve_promise(this);
+ GotoIf(SmiLessThan(index, SmiConstant(Smi::kZero)), &already_called);
+ StoreContextElementNoWriteBarrier(context, kPromiseAllResolveElementIndexSlot,
+ SmiConstant(-1));
+
// Set element in FixedArray
Label runtime_set_element(this), did_set_element(this);
GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
@@ -2070,7 +1994,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 366c7c22cd..2130101e84 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
-#define V8_BUILTINS_BUILTINS_PROMISE_H_
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/objects/promise.h"
namespace v8 {
namespace internal {
@@ -29,11 +30,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
enum PromiseAllResolveElementContextSlots {
- // Whether the resolve callback was already called.
- kPromiseAllResolveElementAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Index into the values array
- kPromiseAllResolveElementIndexSlot,
+ // Index into the values array, or -1 if the callback was already called
+ kPromiseAllResolveElementIndexSlot = Context::MIN_CONTEXT_SLOTS,
// Remaining elements count (mutable HeapNumber)
kPromiseAllResolveElementRemainingElementsSlot,
@@ -90,8 +88,16 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status,
Node* result);
- Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
- Node* resolve, Node* reject,
+ Node* AllocatePromiseReaction(Node* next, Node* payload,
+ Node* fulfill_handler, Node* reject_handler);
+
+ Node* AllocatePromiseReactionJobTask(Heap::RootListIndex map_root_index,
+ Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve,
+ Node* then, Node* thenable,
Node* context);
std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
@@ -105,50 +111,44 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
Node* promise_capability);
- Node* NewPromiseCapability(Node* context, Node* constructor,
- Node* debug_event = nullptr);
-
protected:
void PromiseInit(Node* promise);
- Node* SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor);
-
void PromiseSetHasHandler(Node* promise);
void PromiseSetHandledHint(Node* promise);
- void AppendPromiseCallback(int offset, compiler::Node* promise,
- compiler::Node* value);
+ void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled,
+ Node* on_rejected,
+ Node* result_promise_or_capability);
- Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
- Node* on_reject);
-
- Node* InternalPerformPromiseThen(Node* context, Node* promise,
- Node* on_resolve, Node* on_reject,
- Node* deferred_promise,
- Node* deferred_on_resolve,
- Node* deferred_on_reject);
+ Node* CreatePromiseContext(Node* native_context, int slots);
- void InternalResolvePromise(Node* context, Node* promise, Node* result);
+ Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
+ PromiseReaction::Type type);
- void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
- Label* if_ismodified);
+ // We can shortcut the SpeciesConstructor on {promise_map} if it's
+ // [[Prototype]] is the (initial) Promise.prototype and the @@species
+ // protector is intact, as that guards the lookup path for the "constructor"
+ // property on JSPromise instances which have the %PromisePrototype%.
+ void BranchIfPromiseSpeciesLookupChainIntact(Node* native_context,
+ Node* promise_map,
+ Label* if_fast, Label* if_slow);
- void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
- Label* if_isunmodified, Label* if_ismodified);
+ // We can skip the "then" lookup on {receiver_map} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then() protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ void BranchIfPromiseThenLookupChainIntact(Node* native_context,
+ Node* receiver_map, Label* if_fast,
+ Label* if_slow);
- Node* CreatePromiseContext(Node* native_context, int slots);
- void PromiseFulfill(Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status);
+ template <typename... TArgs>
+ Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
void BranchIfAccessCheckFailed(Node* context, Node* native_context,
Node* promise_constructor, Node* executor,
Label* if_noaccess);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- bool debug_event);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- Node* debug_event);
std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
Node* constructor,
Node* native_context);
@@ -174,9 +174,10 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const NodeGenerator& handled_by);
Node* PromiseStatus(Node* promise);
- void PerformFulfillClosure(Node* context, Node* value, bool should_resolve);
- private:
+ void PromiseReactionJob(Node* context, Node* argument, Node* handler,
+ Node* payload, PromiseReaction::Type type);
+
Node* IsPromiseStatus(Node* actual, v8::Promise::PromiseState expected);
void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
@@ -186,4 +187,4 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_PROMISE_H_
+#endif // V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 64e838d53a..fb35f48a15 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -73,22 +73,57 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) {
+ Comment("AllocateJSArrayForCodeStubArguments");
+
+ Label if_empty_array(this), allocate_js_array(this);
+ // Do not use AllocateJSArray since {elements} might end up in LOS.
+ VARIABLE(elements, MachineRepresentation::kTagged);
+
+ TNode<Smi> length = ParameterToTagged(argc, mode);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &if_empty_array);
+ {
+ Label if_large_object(this, Label::kDeferred);
+ Node* allocated_elements = AllocateFixedArray(PACKED_ELEMENTS, argc, mode,
+ kAllowLargeObjectAllocation);
+ elements.Bind(allocated_elements);
+
+ VARIABLE(index, MachineType::PointerRepresentation(),
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&index}, zone());
+
+ GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
+ &if_large_object);
+ args.ForEach(list, [=, &index](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
+ index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+
+ BIND(&if_large_object);
+ {
+ args.ForEach(list, [=, &index](Node* arg) {
+ Store(allocated_elements, index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+ }
+ }
+
+ BIND(&if_empty_array);
+ {
+ elements.Bind(EmptyFixedArrayConstant());
+ Goto(&allocate_js_array);
+ }
+
+ BIND(&allocate_js_array);
+ // Allocate the result JSArray.
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* argc_smi = ParameterToTagged(argc, mode);
-
- Node* array = AllocateJSArray(PACKED_ELEMENTS, array_map, argc, argc_smi,
- nullptr, mode);
- Node* elements = LoadElements(array);
-
- VARIABLE(index, MachineType::PointerRepresentation(),
- IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
- VariableList list({&index}, zone());
- args.ForEach(list, [=, &index](Node* arg) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, index.value(),
- arg);
- Increment(&index, kPointerSize);
- });
+ Node* array = AllocateUninitializedJSArrayWithoutElements(array_map, length);
+ StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset,
+ elements.value());
+
return array;
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 4227c628d1..45329eed70 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
@@ -135,10 +136,9 @@ void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const context, Node* const regexp, Node* const match_info,
- Node* const string) {
+ TNode<String> const string) {
CSA_ASSERT(this, IsFixedArrayMap(LoadMap(match_info)));
CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
Label named_captures(this), out(this);
@@ -152,7 +152,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- Node* const first = SubString(context, string, start, end);
+
+ TNode<String> const first = SubString(string, SmiUntag(start), SmiUntag(end));
Node* const result =
AllocateRegExpResult(context, num_results, start, string);
@@ -188,7 +189,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
- Node* const capture = SubString(context, string, start, end);
+ TNode<String> const capture =
+ SubString(string, SmiUntag(start), SmiUntag(end));
StoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -441,18 +443,11 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// contains the uninitialized sentinel as a smi.
Node* const code = var_code.value();
-#ifdef DEBUG
- {
- Label next(this);
- GotoIfNot(TaggedIsSmi(code), &next);
-
- CSA_ASSERT(this,
- SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)));
- Goto(&next);
-
- BIND(&next);
- }
-#endif
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ GotoIfNot(TaggedIsSmi(code), ok);
+ Branch(SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)), ok,
+ not_ok);
+ });
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
@@ -475,7 +470,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Argument 1: Previous index.
MachineType arg1_type = type_int32;
- Node* const arg1 = TruncateWordToWord32(int_last_index);
+ Node* const arg1 = TruncateIntPtrToInt32(int_last_index);
// Argument 2: Start of string data.
MachineType arg2_type = type_ptr;
@@ -582,7 +577,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
[=, &var_to_offset](Node* offset) {
Node* const value = Load(MachineType::Int32(),
static_offsets_vector_address, offset);
- Node* const smi_value = SmiFromWord32(value);
+ Node* const smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kPointerSize);
@@ -766,10 +761,9 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
- Node* const regexp,
- Node* const string,
- const bool is_fastpath) {
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
+ Node* const context, Node* const regexp, TNode<String> const string,
+ const bool is_fastpath) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_didnotmatch(this), out(this);
@@ -944,7 +938,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
// Slow path stub for RegExpPrototypeExec to decrease code size.
TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Return(RegExpPrototypeExecBody(context, regexp, string, false));
@@ -1030,7 +1024,7 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
@@ -1069,13 +1063,13 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- Goto(&next); \
- BIND(&next); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ Goto(&next); \
+ BIND(&next); \
} while (false)
CASE_FOR_FLAG(JSRegExp::kGlobal);
@@ -1099,8 +1093,8 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Label if_isflagset(this); \
BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
BIND(&if_isflagset); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- var_flags = Signed(WordOr(var_flags, IntPtrConstant(FLAG))); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
Goto(&next); \
BIND(&next); \
} while (false)
@@ -1118,7 +1112,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
// char for each set flag.
{
- Node* const result = AllocateSeqOneByteString(context, var_length);
+ Node* const result = AllocateSeqOneByteString(context, var_length.value());
VARIABLE(var_offset, MachineType::PointerRepresentation(),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1126,7 +1120,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
Node* const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
@@ -1384,8 +1378,7 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(IsUndefined(maybe_flags), &next);
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+ ThrowTypeError(context, MessageTemplate::kRegExpFlags);
BIND(&next);
}
@@ -1450,12 +1443,8 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
BIND(&if_isnotprototype);
{
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str =
- HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
- "RegExp.prototype.source"));
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
+ ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp,
+ "RegExp.prototype.source");
}
}
}
@@ -1465,7 +1454,7 @@ Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
JSRegExp::Flag flag) {
Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
Node* const mask = SmiConstant(flag);
- return SmiToWord32(SmiAnd(flags, mask));
+ return SmiToInt32(SmiAnd(flags, mask));
}
// Load through the GetProperty stub.
@@ -1533,8 +1522,6 @@ Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
JSRegExp::Flag flag, int counter,
const char* method_name) {
- Isolate* isolate = this->isolate();
-
// Check whether we have an unmodified regexp instance.
Label if_isunmodifiedjsregexp(this),
if_isnotunmodifiedjsregexp(this, Label::kDeferred);
@@ -1573,14 +1560,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
}
BIND(&if_isnotprototype);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str = HeapConstant(
- isolate->factory()->NewStringFromAsciiChecked(method_name));
- CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp, method_name); }
}
}
@@ -1707,7 +1687,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1795,163 +1775,14 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
return var_result.value();
}
-namespace {
-
-// Utility class implementing a growable fixed array through CSA.
-class GrowableFixedArray {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- public:
- explicit GrowableFixedArray(CodeStubAssembler* a)
- : assembler_(a),
- var_array_(a, MachineRepresentation::kTagged),
- var_length_(a, MachineType::PointerRepresentation()),
- var_capacity_(a, MachineType::PointerRepresentation()) {
- Initialize();
- }
-
- Node* length() const { return var_length_.value(); }
-
- Variable* var_array() { return &var_array_; }
- Variable* var_length() { return &var_length_; }
- Variable* var_capacity() { return &var_capacity_; }
-
- void Push(Node* const value) {
- CodeStubAssembler* a = assembler_;
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- Label grow(a), store(a);
- a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
-
- a->BIND(&grow);
- {
- Node* const new_capacity = NewCapacity(a, capacity);
- Node* const new_array = ResizeFixedArray(length, new_capacity);
-
- var_capacity_.Bind(new_capacity);
- var_array_.Bind(new_array);
- a->Goto(&store);
- }
-
- a->BIND(&store);
- {
- Node* const array = var_array_.value();
- a->StoreFixedArrayElement(array, length, value);
-
- Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
- var_length_.Bind(new_length);
- }
- }
-
- Node* ToJSArray(Node* const context) {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- Node* const native_context = a->LoadNativeContext(context);
- Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
-
- // Shrink to fit if necessary.
- {
- Label next(a);
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- a->GotoIf(a->WordEqual(length, capacity), &next);
-
- Node* const array = ResizeFixedArray(length, length);
- var_array_.Bind(array);
- var_capacity_.Bind(length);
- a->Goto(&next);
-
- a->BIND(&next);
- }
-
- Node* const result_length = a->SmiTag(length());
- Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
- array_map, result_length, nullptr);
-
- // Note: We do not currently shrink the fixed array.
-
- a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
-
- return result;
- }
-
- private:
- void Initialize() {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- static const int kInitialArraySize = 8;
- Node* const capacity = a->IntPtrConstant(kInitialArraySize);
- Node* const array = a->AllocateFixedArray(kind, capacity);
-
- a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
- Heap::kTheHoleValueRootIndex);
-
- var_array_.Bind(array);
- var_capacity_.Bind(capacity);
- var_length_.Bind(a->IntPtrConstant(0));
- }
-
- Node* NewCapacity(CodeStubAssembler* a,
- compiler::SloppyTNode<IntPtrT> current_capacity) {
- CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
-
- // Growth rate is analog to JSObject::NewElementsCapacity:
- // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
-
- Node* const new_capacity = a->IntPtrAdd(
- a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
- a->IntPtrConstant(16));
-
- return new_capacity;
- }
-
- // Creates a new array with {new_capacity} and copies the first
- // {element_count} elements from the current array.
- Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
- CodeStubAssembler* a = assembler_;
-
- CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
-
- Node* const from_array = var_array_.value();
-
- CodeStubAssembler::ExtractFixedArrayFlags flags;
- flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- Node* to_array = a->ExtractFixedArray(from_array, nullptr, element_count,
- new_capacity, flags);
-
- return to_array;
- }
-
- private:
- CodeStubAssembler* const assembler_;
- Variable var_array_;
- Variable var_length_;
- Variable var_capacity_;
-};
-
-} // namespace
-
void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
const bool is_fastpath) {
- CSA_ASSERT(this, IsString(string));
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
-
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1975,7 +1806,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Allocate an array to store the resulting match strings.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
// Loop preparations. Within the loop, collect results from RegExpExec
// and store match strings in the array.
@@ -2001,9 +1832,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const match_to = LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- Node* match = SubString(context, string, match_from, match_to);
- var_match.Bind(match);
-
+ var_match.Bind(
+ SubString(string, SmiUntag(match_from), SmiUntag(match_to)));
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
@@ -2052,7 +1882,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Store the match, growing the fixed array if needed.
- array.Push(match);
+ array.Push(CAST(match));
// Advance last index if the match is the empty string.
@@ -2087,7 +1917,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Wrap the match in a JSArray.
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
}
@@ -2107,7 +1937,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2126,7 +1956,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
// 2) pattern is a string
TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kPattern);
+ TNode<String> const string = CAST(Parameter(Descriptor::kPattern));
Node* const context = Parameter(Descriptor::kContext);
RegExpPrototypeMatchBody(context, receiver, string, true);
@@ -2248,7 +2078,7 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2277,12 +2107,11 @@ TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
// JSRegExp, {string} is a String, and {limit} is a Smi.
void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
Node* const limit) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
CSA_ASSERT(this, TaggedIsSmi(limit));
- CSA_ASSERT(this, IsString(string));
TNode<Smi> const smi_zero = SmiConstant(0);
TNode<IntPtrT> const int_zero = IntPtrConstant(0);
@@ -2343,7 +2172,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Loop preparations.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
VARIABLE(var_last_matched_until, MachineRepresentation::kTagged);
VARIABLE(var_next_search_from, MachineRepresentation::kTagged);
@@ -2422,10 +2251,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = last_matched_until;
Node* const to = match_from;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
GotoIf(WordEqual(array.length(), int_limit), &out);
}
@@ -2462,21 +2288,19 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&select_capture);
{
- Node* const substr = SubString(context, string, from, to);
- var_value.Bind(substr);
+ var_value.Bind(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&store_value);
}
BIND(&select_undefined);
{
- Node* const undefined = UndefinedConstant();
- var_value.Bind(undefined);
+ var_value.Bind(UndefinedConstant());
Goto(&store_value);
}
BIND(&store_value);
{
- array.Push(var_value.value());
+ array.Push(CAST(var_value.value()));
GotoIf(WordEqual(array.length(), int_limit), &out);
Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
@@ -2499,16 +2323,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = var_last_matched_until.value();
Node* const to = string_length;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&out);
}
BIND(&out);
{
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
@@ -2525,12 +2346,11 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const maybe_limit = Parameter(Descriptor::kLimit);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
// TODO(jgruber): Even if map checks send us to the fast path, we still need
// to verify the constructor property and jump to the slow path if it has
@@ -2600,7 +2420,7 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
@@ -2700,9 +2520,9 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Goto(&loop);
BIND(&loop);
{
- GotoIfNot(IntPtrLessThan(var_i, end), &create_result);
+ GotoIfNot(IntPtrLessThan(var_i.value(), end), &create_result);
- Node* const elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const elem = LoadFixedArrayElement(res_elems, var_i.value());
Label if_issmi(this), if_isstring(this), loop_epilogue(this);
Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
@@ -2726,9 +2546,10 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_isnegativeorzero);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
- Node* const next_elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const next_elem =
+ LoadFixedArrayElement(res_elems, var_i.value());
var_match_start = SmiSub(next_elem, elem);
Goto(&loop_epilogue);
@@ -2740,13 +2561,14 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
- TNode<Smi> match_start = var_match_start;
+ TNode<Smi> match_start = var_match_start.value();
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
match_start, string);
- Node* const replacement_str = ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, var_i, replacement_str);
+ TNode<String> const replacement_str =
+ ToString_Inline(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, var_i.value(), replacement_str);
TNode<Smi> const elem_length = LoadStringLengthAsSmi(elem);
var_match_start = SmiAdd(match_start, elem_length);
@@ -2756,7 +2578,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&loop_epilogue);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
Goto(&loop);
}
}
@@ -2795,7 +2617,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// Overwrite the i'th element in the results with the string
// we got back from the callback function.
- Node* const replacement_str =
+ TNode<String> const replacement_str =
ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, index, replacement_str);
@@ -2821,20 +2643,19 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
}
Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
- Node* context, Node* regexp, Node* string, Node* replace_string) {
+ Node* context, Node* regexp, TNode<String> string,
+ TNode<String> replace_string) {
// The fast path is reached only if {receiver} is an unmodified
// JSRegExp instance, {replace_value} is non-callable, and
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
+ CSA_ASSERT(this, IsFastRegExp(context, regexp));
+
Node* const smi_zero = SmiConstant(0);
const bool kIsFastPath = true;
- CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(replace_string));
- CSA_ASSERT(this, IsString(string));
-
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ TVARIABLE(String, var_result, EmptyStringConstant());
VARIABLE(var_match_indices, MachineRepresentation::kTagged);
VARIABLE(var_last_match_end, MachineRepresentation::kTagged, smi_zero);
VARIABLE(var_is_unicode, MachineRepresentation::kWord32, Int32Constant(0));
@@ -2871,22 +2692,21 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* const result = StringAdd(context, var_result.value(), first_part);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ var_result = StringAdd(context, var_result.value(), first_part);
Goto(&loop_end);
}
BIND(&if_replaceisnotempty);
{
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* result = StringAdd(context, var_result.value(), first_part);
- result = StringAdd(context, result, replace_string);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ TNode<String> result =
+ StringAdd(context, var_result.value(), first_part);
+ var_result = StringAdd(context, result, replace_string);
Goto(&loop_end);
}
@@ -2910,10 +2730,9 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
BIND(&if_nofurthermatches);
{
TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
- Node* const last_part =
- SubString(context, string, var_last_match_end.value(), string_length);
- Node* const result = StringAdd(context, var_result.value(), last_part);
- var_result.Bind(result);
+ TNode<String> const last_part = SubString(
+ string, SmiUntag(var_last_match_end.value()), SmiUntag(string_length));
+ var_result = StringAdd(context, var_result.value(), last_part);
Goto(&out);
}
@@ -2924,12 +2743,11 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const replace_value = Parameter(Descriptor::kReplaceValue);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
Label checkreplacestring(this), if_iscallable(this),
runtime(this, Label::kDeferred);
@@ -2942,7 +2760,8 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// 3. Does ToString({replace_value}) contain '$'?
BIND(&checkreplacestring);
{
- Node* const replace_string = ToString_Inline(context, replace_value);
+ TNode<String> const replace_string =
+ ToString_Inline(context, replace_value);
// ToString(replaceValue) could potentially change the shape of the RegExp
// object. Recheck that we are still on the fast path and bail to runtime
@@ -3028,7 +2847,7 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
Label stub(this), runtime(this, Label::kDeferred);
@@ -3046,27 +2865,19 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// Simple string matching functionality for internal use which does not modify
// the last match info.
TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Node* const smi_zero = SmiConstant(0);
-
- CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
-
Node* const native_context = LoadNativeContext(context);
Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
-
Node* const match_indices = RegExpExecInternal(context, regexp, string,
smi_zero, internal_match_info);
-
Node* const null = NullConstant();
- Label if_matched(this), if_didnotmatch(this);
- Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
-
- BIND(&if_didnotmatch);
+ Label if_matched(this);
+ GotoIfNot(WordEqual(match_indices, null), &if_matched);
Return(null);
BIND(&if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index c8a94b7293..b57b90acf9 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_REGEXP_H_
-#define V8_BUILTINS_BUILTINS_REGEXP_H_
+#ifndef V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
+#define V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
#include "src/code-stub-assembler.h"
@@ -50,7 +50,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
Node* const match_info,
- Node* const string);
+ TNode<String> const string);
Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
Node* const regexp,
@@ -58,7 +58,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Label* if_didnotmatch,
const bool is_fastpath);
Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> string, const bool is_fastpath);
Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
MessageTemplate::Template msg_template,
@@ -100,7 +100,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const is_unicode, bool is_fastpath);
void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> const string,
+ const bool is_fastpath);
void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
Node* const string);
@@ -108,15 +109,16 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const string);
void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
- Node* const string, Node* const limit);
+ TNode<String> const string, Node* const limit);
Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
Node* replace_callable);
- Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_string);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp,
+ TNode<String> string,
+ TNode<String> replace_string);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_REGEXP_H_
+#endif // V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 278a48c68e..2c9f0791da 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -69,9 +69,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
BIND(&invalid);
{
- CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
- tagged);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
+ tagged);
}
BIND(&not_float_or_clamped);
@@ -96,15 +95,12 @@ Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
// The |number_index| output parameter is used only for architectures that
// don't currently have a TF implementation and forward to runtime functions
// instead; they expect the value has already been coerced to an integer.
- *number_index = ToSmiIndex(tagged, context, &range_error);
- var_result.Bind(SmiToWord32(*number_index));
+ *number_index = ToSmiIndex(CAST(tagged), CAST(context), &range_error);
+ var_result.Bind(SmiToInt32(*number_index));
Goto(&done);
BIND(&range_error);
- {
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
BIND(&done);
return var_result.value();
@@ -119,8 +115,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
BIND(&check_passed);
}
@@ -169,20 +164,20 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(
- AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+ Return(
+ SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
BIND(&u8);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
BIND(&i16);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(SmiFromWord32(AtomicLoad(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1))));
BIND(&i32);
Return(ChangeInt32ToTagged(
@@ -293,20 +288,20 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
@@ -371,22 +366,22 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicCompareExchange(MachineType::Int8(), backing_store,
- index_word, old_value_word32,
- new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&u8);
- Return(SmiFromWord32(
- AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word,
- old_value_word32, new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Int16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Uint16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
@@ -468,22 +463,20 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 195572de8e..5cc4621b84 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -124,42 +124,6 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
return IntPtrAdd(string_data, offset_in_bytes);
}
-void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
- Node* context, Variable* var_start, Node* start, Node* string_length) {
- TNode<Object> const start_int = ToInteger_Inline(
- CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero);
- TNode<Smi> const zero = SmiConstant(0);
-
- Label done(this);
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- TNode<Smi> const start_int_smi = CAST(start_int);
- var_start->Bind(Select(
- SmiLessThan(start_int_smi, zero),
- [&] { return SmiMax(SmiAdd(string_length, start_int_smi), zero); },
- [&] { return start_int_smi; }, MachineRepresentation::kTagged));
- Goto(&done);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {start} is a heap number, it is definitely out of bounds. If it is
- // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
- // positive, set {start} to {string_length} which ultimately results in
- // returning an empty string.
- TNode<HeapNumber> const start_int_hn = CAST(start_int);
- TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const start_float = LoadHeapNumberValue(start_int_hn);
- var_start->Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(start_float, float_zero), zero, string_length));
- Goto(&done);
- }
- BIND(&done);
-}
-
void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* right) {
VARIABLE(var_left, MachineRepresentation::kTagged, left);
@@ -300,21 +264,23 @@ void StringBuiltinsAssembler::StringEqual_Loop(
{
// If {offset} equals {end}, no difference was found, so the
// strings are equal.
- GotoIf(WordEqual(var_offset, length), if_equal);
+ GotoIf(WordEqual(var_offset.value(), length), if_equal);
// Load the next characters from {lhs} and {rhs}.
Node* lhs_value =
Load(lhs_type, lhs_data,
- WordShl(var_offset, ElementSizeLog2Of(lhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(lhs_type.representation())));
Node* rhs_value =
Load(rhs_type, rhs_data,
- WordShl(var_offset, ElementSizeLog2Of(rhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(rhs_type.representation())));
// Check if the characters match.
GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
Goto(&loop);
}
}
@@ -408,13 +374,13 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
{
// Check if {offset} equals {end}.
Label if_done(this), if_notdone(this);
- Branch(WordEqual(var_offset, end), &if_done, &if_notdone);
+ Branch(WordEqual(var_offset.value(), end), &if_done, &if_notdone);
BIND(&if_notdone);
{
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset);
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset.value());
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset.value());
// Check if the characters match.
Label if_valueissame(this), if_valueisnotsame(this);
@@ -424,7 +390,7 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
BIND(&if_valueissame);
{
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
}
Goto(&loop);
@@ -563,20 +529,21 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
-
+ // TODO(sigurds) Figure out if passing length as argument pays off.
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
- TNode<Int32T> code = StringCharCodeAt(receiver, position);
-
+ TNode<Int32T> code =
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
-TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
@@ -587,7 +554,7 @@ TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
@@ -648,11 +615,12 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// The {code16} fits into the SeqOneByteString {one_byte_result}.
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT8_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
+ var_max_index.value(), UINT8_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
});
arguments.PopAndReturn(one_byte_result);
@@ -667,16 +635,17 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// their corresponding positions in the new 16-bit string.
TNode<IntPtrT> zero = IntPtrConstant(0);
CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
- var_max_index, String::ONE_BYTE_ENCODING,
+ var_max_index.value(), String::ONE_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Node* max_index_offset =
+ ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
max_index_offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
@@ -689,14 +658,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS,
+ var_max_index.value(), UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
},
- var_max_index);
+ var_max_index.value());
arguments.PopAndReturn(two_byte_result);
}
@@ -728,7 +697,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
Node* value = StringCharCodeAt(receiver, index);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -742,9 +711,11 @@ TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
maybe_position, UndefinedConstant(),
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
+ // This is always a call to a builtin from Javascript,
+ // so we need to produce UTF32.
Node* value = LoadSurrogatePairAt(receiver, length, index,
UnicodeEncoding::UTF32);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -1044,8 +1015,8 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Branch(IsNullOrUndefined(value), &throw_exception, &out);
BIND(&throw_exception);
- TailCallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- StringConstant(method_name));
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ method_name);
BIND(&out);
}
@@ -1173,8 +1144,8 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
Node* const matched =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_start_index, match_end_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_start_index), SmiUntag(match_end_index));
Node* const replacement_string =
CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
match_start_index, replace_string, dollar_index);
@@ -1242,11 +1213,10 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
BIND(&invalid_count);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidCountValue),
- var_count.value());
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidCountValue,
+ var_count.value());
}
+
BIND(&invalid_string_length);
{
CallRuntime(Runtime::kThrowInvalidStringLength, context);
@@ -1288,7 +1258,7 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
{
{
Label next(this);
- GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
+ GotoIfNot(SmiToInt32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
var_temp.value()));
Goto(&next);
@@ -1412,8 +1382,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
GotoIf(SmiEqual(match_start_index, smi_zero), &next);
Node* const prefix =
- CallBuiltin(Builtins::kSubString, context, subject_string, smi_zero,
- match_start_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ IntPtrConstant(0), SmiUntag(match_start_index));
var_result.Bind(prefix);
Goto(&next);
@@ -1453,8 +1423,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&out);
{
Node* const suffix =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_end_index, subject_length);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_end_index), SmiUntag(subject_length));
Node* const result =
CallStub(stringadd_callable, context, var_result.value(), suffix);
Return(result);
@@ -1587,14 +1557,15 @@ class StringPadAssembler : public StringBuiltinsAssembler {
GotoIf(IsUndefined(fill), &pad);
var_fill_string = ToString_Inline(context, fill);
- var_fill_length = LoadStringLengthAsWord(var_fill_string);
+ var_fill_length = LoadStringLengthAsWord(var_fill_string.value());
- Branch(IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)), &pad,
- &dont_pad);
+ Branch(IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)),
+ &pad, &dont_pad);
}
BIND(&pad);
{
- CSA_ASSERT(this, IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)));
CSA_ASSERT(this, SmiGreaterThan(max_length, string_length));
Callable stringadd_callable =
@@ -1604,38 +1575,37 @@ class StringPadAssembler : public StringBuiltinsAssembler {
VARIABLE(var_pad, MachineRepresentation::kTagged);
Label single_char_fill(this), multi_char_fill(this), return_result(this);
- Branch(IntPtrEqual(var_fill_length, IntPtrConstant(1)), &single_char_fill,
- &multi_char_fill);
+ Branch(IntPtrEqual(var_fill_length.value(), IntPtrConstant(1)),
+ &single_char_fill, &multi_char_fill);
// Fast path for a single character fill. No need to calculate number of
// repetitions or remainder.
BIND(&single_char_fill);
{
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
+ static_cast<Node*>(var_fill_string.value()),
pad_length));
Goto(&return_result);
}
BIND(&multi_char_fill);
{
TNode<Int32T> const fill_length_word32 =
- TruncateWordToWord32(var_fill_length);
- TNode<Int32T> const pad_length_word32 = SmiToWord32(pad_length);
+ TruncateIntPtrToInt32(var_fill_length.value());
+ TNode<Int32T> const pad_length_word32 = SmiToInt32(pad_length);
TNode<Int32T> const repetitions_word32 =
Int32Div(pad_length_word32, fill_length_word32);
TNode<Int32T> const remaining_word32 =
Int32Mod(pad_length_word32, fill_length_word32);
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
- SmiFromWord32(repetitions_word32)));
+ var_fill_string.value(),
+ SmiFromInt32(repetitions_word32)));
GotoIfNot(remaining_word32, &return_result);
{
- Node* const remainder_string =
- CallBuiltin(Builtins::kSubString, context,
- static_cast<Node*>(var_fill_string), SmiConstant(0),
- SmiFromWord32(remaining_word32));
+ Node* const remainder_string = CallBuiltin(
+ Builtins::kStringSubstring, context, var_fill_string.value(),
+ IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32));
var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(),
remainder_string));
Goto(&return_result);
@@ -1679,8 +1649,8 @@ TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
Label out(this);
- VARIABLE(var_start, MachineRepresentation::kTagged);
- VARIABLE(var_end, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_start);
+ TVARIABLE(IntPtrT, var_end);
const int kStart = 0;
const int kEnd = 1;
@@ -1688,69 +1658,38 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStart);
- TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStart);
+ TNode<Object> end = args.GetOptionalArgumentValue(kEnd);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- TNode<Smi> const smi_zero = SmiConstant(0);
-
// 1. Let O be ? RequireObjectCoercible(this value).
RequireObjectCoercible(context, receiver, "String.prototype.slice");
// 2. Let S be ? ToString(O).
- Node* const subject_string =
- CallBuiltin(Builtins::kToString, context, receiver);
+ TNode<String> const subject_string =
+ CAST(CallBuiltin(Builtins::kToString, context, receiver));
// 3. Let len be the number of elements in S.
- TNode<Smi> const length = LoadStringLengthAsSmi(subject_string);
+ TNode<IntPtrT> const length = LoadStringLengthAsWord(subject_string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, length);
// 5. If end is undefined, let intEnd be len;
- var_end.Bind(length);
+ var_end = length;
GotoIf(IsUndefined(end), &out);
- // else let intEnd be ? ToInteger(end).
- Node* const end_int =
- ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero);
-
- // 7. If intEnd < 0, let to be max(len + intEnd, 0);
- // otherwise let to be min(intEnd, len).
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(end_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- Node* const length_plus_end = SmiAdd(length, end_int);
- var_end.Bind(Select(SmiLessThan(end_int, smi_zero),
- [&] { return SmiMax(length_plus_end, smi_zero); },
- [&] { return SmiMin(length, end_int); },
- MachineRepresentation::kTagged));
- Goto(&out);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {end} is a heap number, it is definitely out of bounds. If it is
- // negative, {int_end} = max({length} + {int_end}),0) = 0'. If it is
- // positive, set {int_end} to {length} which ultimately results in
- // returning an empty string.
- Node* const float_zero = Float64Constant(0.);
- Node* const end_float = LoadHeapNumberValue(end_int);
- var_end.Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(end_float, float_zero), smi_zero, length));
- Goto(&out);
- }
+ // Convert {end} to a relative index.
+ var_end = ConvertToRelativeIndex(context, end, length);
+ Goto(&out);
Label return_emptystring(this);
BIND(&out);
{
- GotoIf(SmiLessThanOrEqual(var_end.value(), var_start.value()),
+ GotoIf(IntPtrLessThanOrEqual(var_end.value(), var_start.value()),
&return_emptystring);
- Node* const result =
- SubString(context, subject_string, var_start.value(), var_end.value(),
- SubStringFlags::FROM_TO_ARE_BOUNDED);
+ TNode<String> const result =
+ SubString(subject_string, var_start.value(), var_end.value());
args.PopAndReturn(result);
}
@@ -1868,25 +1807,25 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStartArg);
- TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStartArg);
+ TNode<Object> length = args.GetOptionalArgumentValue(kLengthArg);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label out(this);
- TVARIABLE(Smi, var_start);
+ TVARIABLE(IntPtrT, var_start);
TVARIABLE(Number, var_length);
- TNode<Smi> const zero = SmiConstant(0);
+ TNode<IntPtrT> const zero = IntPtrConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substr");
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, string_length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, string_length);
// Conversions and bounds-checks for {length}.
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
@@ -1897,7 +1836,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Branch(IsUndefined(length), &if_isundefined, &if_isnotundefined);
BIND(&if_isundefined);
- var_length = string_length;
+ var_length = SmiTag(string_length);
Goto(&if_issmi);
BIND(&if_isnotundefined);
@@ -1905,18 +1844,20 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubAssembler::kTruncateMinusZero);
}
- TVARIABLE(Smi, var_result_length);
+ TVARIABLE(IntPtrT, var_result_length);
- Branch(TaggedIsSmi(var_length), &if_issmi, &if_isheapnumber);
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
BIND(&if_issmi);
{
- TNode<Smi> const positive_length = SmiMax(CAST(var_length), zero);
- TNode<Smi> const minimal_length = SmiSub(string_length, var_start);
- var_result_length = SmiMin(positive_length, minimal_length);
+ TNode<IntPtrT> const positive_length =
+ IntPtrMax(SmiUntag(CAST(var_length.value())), zero);
+ TNode<IntPtrT> const minimal_length =
+ IntPtrSub(string_length, var_start.value());
+ var_result_length = IntPtrMin(positive_length, minimal_length);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
@@ -1926,11 +1867,12 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumber(var_length));
+ CSA_ASSERT(this, IsHeapNumber(var_length.value()));
Label if_isnegative(this), if_ispositive(this);
TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const length_float = LoadHeapNumberValue(CAST(var_length));
+ TNode<Float64T> const length_float =
+ LoadHeapNumberValue(CAST(var_length.value()));
Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
&if_ispositive);
@@ -1939,17 +1881,17 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
BIND(&if_ispositive);
{
- var_result_length = SmiSub(string_length, var_start);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ var_result_length = IntPtrSub(string_length, var_start.value());
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
}
BIND(&out);
{
- TNode<Smi> const end = SmiAdd(var_start, var_result_length);
- Node* const result = SubString(context, string, var_start, end);
- args.PopAndReturn(result);
+ TNode<IntPtrT> const end =
+ IntPtrAdd(var_start.value(), var_result_length.value());
+ args.PopAndReturn(SubString(string, var_start.value(), end));
}
}
@@ -1959,7 +1901,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
Label out(this);
TVARIABLE(Smi, var_result);
- TNode<Object> const value_int =
+ TNode<Number> const value_int =
ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
@@ -1967,8 +1909,9 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
BIND(&if_issmi);
{
+ TNode<Smi> value_smi = CAST(value_int);
Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
- Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+ Branch(SmiAbove(value_smi, limit), &if_isoutofbounds, &if_isinbounds);
BIND(&if_isinbounds);
{
@@ -1980,7 +1923,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
{
TNode<Smi> const zero = SmiConstant(0);
var_result =
- SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit);
+ SelectTaggedConstant(SmiLessThan(value_smi, zero), zero, limit);
Goto(&out);
}
}
@@ -1999,16 +1942,15 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
}
BIND(&out);
- return var_result;
+ return var_result.value();
}
-TF_BUILTIN(SubString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* string = Parameter(Descriptor::kString);
- Node* from = Parameter(Descriptor::kFrom);
- Node* to = Parameter(Descriptor::kTo);
+TF_BUILTIN(StringSubstring, CodeStubAssembler) {
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
+ TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
- Return(SubString(context, string, from, to));
+ Return(SubString(string, from, to));
}
// ES6 #sec-string.prototype.substring
@@ -2031,7 +1973,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
VARIABLE(var_end, MachineRepresentation::kTagged);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substring");
Node* const length = LoadStringLengthAsSmi(string);
@@ -2061,9 +2003,8 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
BIND(&out);
{
- Node* result =
- SubString(context, string, var_start.value(), var_end.value());
- args.PopAndReturn(result);
+ args.PopAndReturn(SubString(string, SmiUntag(var_start.value()),
+ SmiUntag(var_end.value())));
}
}
@@ -2072,14 +2013,14 @@ TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
Generate(String::kTrim, "String.prototype.trim");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimLeft, StringTrimAssembler) {
- Generate(String::kTrimLeft, "String.prototype.trimLeft");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
+ Generate(String::kTrimStart, "String.prototype.trimLeft");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimRight, StringTrimAssembler) {
- Generate(String::kTrimRight, "String.prototype.trimRight");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
+ Generate(String::kTrimEnd, "String.prototype.trimRight");
}
void StringTrimAssembler::Generate(String::TrimMode mode,
@@ -2092,7 +2033,7 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
Node* const receiver = arguments.GetReceiver();
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string = ToThisString(context, receiver, method_name);
+ TNode<String> const string = ToThisString(context, receiver, method_name);
TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
ToDirectStringAssembler to_direct(state(), string);
@@ -2105,20 +2046,20 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
- if (mode == String::kTrimLeft || mode == String::kTrim) {
+ if (mode == String::kTrimStart || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
is_stringonebyte, &var_start,
string_length, 1, &return_emptystring);
}
- if (mode == String::kTrimRight || mode == String::kTrim) {
+ if (mode == String::kTrimEnd || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(
string_data, string_data_offset, is_stringonebyte, &var_end,
IntPtrConstant(-1), -1, &return_emptystring);
}
- arguments.PopAndReturn(SubString(context, string, SmiTag(var_start),
- SmiAdd(SmiTag(var_end), SmiConstant(1)),
- SubStringFlags::FROM_TO_ARE_BOUNDED));
+ arguments.PopAndReturn(
+ SubString(string, var_start.value(),
+ IntPtrAdd(var_end.value(), IntPtrConstant(1))));
BIND(&if_runtime);
arguments.PopAndReturn(
@@ -2281,21 +2222,21 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
var_result = StringCharCodeAt(string, index);
var_trail = Int32Constant(0);
- GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
+ GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
TNode<IntPtrT> next_index = IntPtrAdd(index, IntPtrConstant(1));
GotoIfNot(IntPtrLessThan(next_index, length), &return_result);
var_trail = StringCharCodeAt(string, next_index);
- Branch(Word32Equal(Word32And(var_trail, Int32Constant(0xFC00)),
+ Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
&handle_surrogate_pair, &return_result);
BIND(&handle_surrogate_pair);
{
- TNode<Int32T> lead = var_result;
- TNode<Int32T> trail = var_trail;
+ TNode<Int32T> lead = var_result.value();
+ TNode<Int32T> trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
CSA_SLOW_ASSERT(this,
@@ -2332,7 +2273,7 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
// ES6 #sec-%stringiteratorprototype%.next
@@ -2383,9 +2324,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("String Iterator.prototype.next"), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("String Iterator.prototype.next"), iterator);
}
}
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index b830a8597d..2a4f23b003 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-typedarray-gen.h"
+
+#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
namespace v8 {
@@ -23,106 +27,22 @@ using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
- const char* method_name,
- int object_offset);
- void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
- const char* method_name,
- IterationKind iteration_kind);
-
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
- TNode<Number> byte_offset, TNode<Number> byte_length);
- void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
- TNode<Map> map, TNode<Smi> length,
- TNode<Number> byte_offset);
-
- TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
- TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
- TNode<Number> byte_offset);
- Node* LoadDataPtr(Node* typed_array);
- TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
-
- // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
- TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
-
- // Loads the element kind of TypedArray instance.
- TNode<Word32T> LoadElementsKind(TNode<Object> typed_array);
-
- // Returns the byte size of an element for a TypedArray elements kind.
- TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
-
- // Fast path for setting a TypedArray (source) onto another TypedArray
- // (target) at an element offset.
- void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
- TNode<IntPtrT> byte_length);
-
- void CallCCopyFastNumberJSArrayElementsToTypedArray(
- TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
-
- void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
- TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length,
- TNode<IntPtrT> offset);
-};
-
TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
TNode<JSTypedArray> array) {
- Label unreachable(this), done(this);
- Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
- uint16_elements(this), int16_elements(this), uint32_elements(this),
- int32_elements(this), float32_elements(this), float64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_clamped_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const size_t kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
-
TVARIABLE(Map, var_typed_map);
-
TNode<Map> array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- for (int i = 0; i < static_cast<int>(kTypedElementsKindCount); i++) {
- BIND(elements_kind_labels[i]);
- {
- ElementsKind kind = static_cast<ElementsKind>(elements_kinds[i]);
- ExternalArrayType type =
- isolate()->factory()->GetArrayTypeFromElementsKind(kind);
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- var_typed_map = HeapConstant(map);
- Goto(&done);
- }
- }
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ ExternalArrayType type =
+ isolate()->factory()->GetArrayTypeFromElementsKind(kind);
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
+ var_typed_map = HeapConstant(map);
+ });
- BIND(&unreachable);
- { Unreachable(); }
- BIND(&done);
- return var_typed_map;
+ return var_typed_map.value();
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -218,7 +138,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Label setup_holder(this), allocate_on_heap(this), aligned(this),
allocate_elements(this), allocate_off_heap(this),
allocate_off_heap_no_init(this), attach_buffer(this), done(this);
- VARIABLE(var_total_size, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_total_size);
// SmiMul returns a heap number in case of Smi overflow.
TNode<Number> byte_length = SmiMul(length, element_size);
@@ -227,10 +147,12 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
TNode<Map> fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
- GotoIf(
- SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
- &allocate_off_heap);
- TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length));
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_byte_length = CAST(byte_length);
+ GotoIf(SmiGreaterThan(smi_byte_length,
+ SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
+ &allocate_off_heap);
+ TNode<IntPtrT> word_byte_length = SmiToIntPtr(smi_byte_length);
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -281,17 +203,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// Fix alignment if needed.
DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
- Node* aligned_header_size =
+ TNode<IntPtrT> aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- Node* size = IntPtrAdd(word_byte_length, aligned_header_size);
- var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
+ TNode<IntPtrT> size = IntPtrAdd(word_byte_length, aligned_header_size);
+ var_total_size = WordAnd(size, IntPtrConstant(~kObjectAlignmentMask));
Goto(&allocate_elements);
}
BIND(&aligned);
{
- Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size.Bind(IntPtrAdd(word_byte_length, header_size));
+ TNode<IntPtrT> header_size =
+ IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
+ var_total_size = IntPtrAdd(word_byte_length, header_size);
Goto(&allocate_elements);
}
@@ -359,7 +282,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&attach_buffer);
{
- AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset);
+ AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
+ byte_offset);
Goto(&done);
}
@@ -368,49 +292,44 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
}
// ES6 #sec-typedarray-length
-TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength));
- TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
+void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<Object> length,
+ TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Label invalid_length(this);
+ Label invalid_length(this, Label::kDeferred), done(this);
- TNode<Number> length = ToInteger_Inline(
- context, maybe_length, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Number> converted_length =
+ ToInteger_Inline(context, length, CodeStubAssembler::kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
- // representation (which uses smi's).
- GotoIf(TaggedIsNotSmi(length), &invalid_length);
- GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
-
- CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, TrueConstant());
- Return(UndefinedConstant());
+ // representation (which uses Smis).
+ GotoIf(TaggedIsNotSmi(converted_length), &invalid_length);
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_converted_length = CAST(converted_length);
+ GotoIf(SmiLessThan(smi_converted_length, SmiConstant(0)), &invalid_length);
+
+ Node* initialize = TrueConstant();
+ CallBuiltin(Builtins::kTypedArrayInitialize, context, holder,
+ converted_length, element_size, initialize);
+ Goto(&done);
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ converted_length);
}
+
+ BIND(&done);
}
// ES6 #sec-typedarray-buffer-byteoffset-length
-TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* buffer = Parameter(Descriptor::kBuffer);
- TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer, TNode<Object> byte_offset,
+ TNode<Object> length, TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
VARIABLE(new_byte_length, MachineRepresentation::kTagged, SmiConstant(0));
@@ -421,7 +340,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
invalid_offset_error(this, Label::kDeferred);
Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this), detached_error(this);
+ length_undefined(this), length_defined(this), detached_error(this),
+ done(this);
GotoIf(IsUndefined(byte_offset), &check_length);
@@ -477,7 +397,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&length_defined);
{
- Node* new_length = ToSmiIndex(length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(length, context, &invalid_length);
GotoIf(IsDetachedBuffer(buffer), &detached_error);
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
@@ -495,22 +415,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&call_init);
{
- Node* new_length = CallBuiltin(Builtins::kDivide, context,
- new_byte_length.value(), element_size);
+ TNode<Object> raw_length = CallBuiltin(
+ Builtins::kDivide, context, new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
- new_length = ToSmiIndex(new_length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(raw_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
new_length, buffer, element_size, offset.value());
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_offset_error);
- {
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidOffset), byte_offset);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidOffset, byte_offset); }
BIND(&start_offset_error);
{
@@ -534,24 +450,84 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength, length);
}
BIND(&detached_error);
{ ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array, TNode<Smi> element_size) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
+
+ TNode<JSFunction> const default_constructor = CAST(LoadContextElement(
+ LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
+
+ Label construct(this), if_detached(this), if_notdetached(this),
+ check_for_sab(this), if_buffernotshared(this), check_prototype(this),
+ done(this);
+ TVARIABLE(JSReceiver, buffer_constructor, default_constructor);
+
+ TNode<JSArrayBuffer> source_buffer = LoadObjectField<JSArrayBuffer>(
+ typed_array, JSArrayBufferView::kBufferOffset);
+ Branch(IsDetachedBuffer(source_buffer), &if_detached, &if_notdetached);
+
+ // TODO(petermarshall): Throw on detached typedArray.
+ TVARIABLE(Smi, source_length);
+ BIND(&if_detached);
+ source_length = SmiConstant(0);
+ Goto(&check_for_sab);
+
+ BIND(&if_notdetached);
+ source_length =
+ CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
+ Goto(&check_for_sab);
+
+ // The spec requires that constructing a typed array using a SAB-backed typed
+ // array use the ArrayBuffer constructor, not the species constructor. See
+ // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
+ BIND(&check_for_sab);
+ TNode<Uint32T> bitfield =
+ LoadObjectField<Uint32T>(source_buffer, JSArrayBuffer::kBitFieldOffset);
+ Branch(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &construct,
+ &if_buffernotshared);
+
+ BIND(&if_buffernotshared);
+ {
+ buffer_constructor =
+ CAST(SpeciesConstructor(context, source_buffer, default_constructor));
+ // TODO(petermarshall): Throw on detached typedArray.
+ GotoIfNot(IsDetachedBuffer(source_buffer), &construct);
+ source_length = SmiConstant(0);
+ Goto(&construct);
+ }
+
+ BIND(&construct);
+ {
+ ConstructByArrayLike(context, holder, typed_array, source_length.value(),
+ element_size);
+ TNode<Object> proto = GetProperty(context, buffer_constructor.value(),
+ PrototypeStringConstant());
+ // TODO(petermarshall): Correct for realm as per 9.1.14 step 4.
+ TNode<JSArrayBuffer> buffer = LoadObjectField<JSArrayBuffer>(
+ holder, JSArrayBufferView::kBufferOffset);
+ CallRuntime(Runtime::kInternalSetPrototype, context, buffer, proto);
+
+ Goto(&done);
+ }
+
+ BIND(&done);
}
Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
CSA_ASSERT(this, IsJSTypedArray(typed_array));
Node* elements = LoadElements(typed_array);
CSA_ASSERT(this, IsFixedTypedArray(elements));
- Node* base_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset));
- Node* external_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset));
- return IntPtrAdd(base_pointer, external_pointer);
+ return LoadFixedTypedArrayBackingStore(CAST(elements));
}
TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
@@ -574,28 +550,24 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
Goto(&done);
BIND(&done);
- return is_valid;
+ return is_valid.value();
}
-TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* array_like = Parameter(Descriptor::kArrayLike);
- Node* initial_length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- Node* context = Parameter(Descriptor::kContext);
-
+void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like, TNode<Object> initial_length,
+ TNode<Smi> element_size) {
Node* initialize = FalseConstant();
- Label invalid_length(this), fill(this), fast_copy(this);
+ Label invalid_length(this), fill(this), fast_copy(this), done(this);
// The caller has looked up length on array_like, which is observable.
- Node* length = ToSmiLength(initial_length, context, &invalid_length);
+ TNode<Smi> length = ToSmiLength(initial_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
element_size, initialize);
GotoIf(SmiNotEqual(length, SmiConstant(0)), &fill);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fill);
TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
@@ -605,7 +577,7 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
// Copy using the elements accessor.
CallRuntime(Runtime::kTypedArrayCopyElements, context, holder, array_like,
length);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fast_copy);
{
@@ -632,16 +604,117 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memcpy,
holder_data_ptr, source_data_ptr, byte_length_intptr);
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength),
- initial_length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ initial_length);
+ }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByIterable(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable, TNode<Object> iterator_fn,
+ TNode<Smi> element_size) {
+ CSA_ASSERT(this, IsCallable(iterator_fn));
+ Label fast_path(this), slow_path(this), done(this);
+
+ TNode<JSArray> array_like = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn));
+ TNode<Object> initial_length = LoadJSArrayLength(array_like);
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+}
+
+TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // If NewTarget is undefined, throw a TypeError exception.
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* shared = LoadObjectField(target, JSFunction::kSharedFunctionInfoOffset);
+ Node* name = LoadObjectField(shared, SharedFunctionInfo::kNameOffset);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+}
+
+TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
+ Label if_arg1isbuffer(this), if_arg1istypedarray(this),
+ if_arg1isreceiver(this), if_arg1isnumber(this), done(this);
+
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ CSA_ASSERT(this, IsNotUndefined(new_target));
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
+ TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
+ TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ TNode<JSTypedArray> holder = CAST(
+ constructor_assembler.EmitFastNewObject(context, target, new_target));
+
+ TNode<Smi> element_size =
+ SmiTag(GetTypedArrayElementSize(LoadElementsKind(holder)));
+
+ GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
+ GotoIf(IsJSArrayBuffer(arg1), &if_arg1isbuffer);
+ GotoIf(IsJSTypedArray(arg1), &if_arg1istypedarray);
+ GotoIf(IsJSReceiver(arg1), &if_arg1isreceiver);
+ Goto(&if_arg1isnumber);
+
+ BIND(&if_arg1isbuffer);
+ ConstructByArrayBuffer(context, holder, CAST(arg1), arg2, arg3, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1istypedarray);
+ TNode<JSTypedArray> typed_array = CAST(arg1);
+ ConstructByTypedArray(context, holder, typed_array, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1isreceiver);
+ {
+ Label if_iteratorundefined(this), if_iteratornotcallable(this);
+ // Get iterator symbol
+ TNode<Object> iteratorFn =
+ CAST(GetMethod(context, arg1, isolate()->factory()->iterator_symbol(),
+ &if_iteratorundefined));
+ GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
+ GotoIfNot(IsCallable(iteratorFn), &if_iteratornotcallable);
+
+ ConstructByIterable(context, holder, CAST(arg1), iteratorFn, element_size);
+ Goto(&done);
+
+ BIND(&if_iteratorundefined);
+ {
+ TNode<HeapObject> array_like = CAST(arg1);
+ TNode<Object> initial_length =
+ GetProperty(context, arg1, LengthStringConstant());
+
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+ Goto(&done);
+ }
+
+ BIND(&if_iteratornotcallable);
+ { ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
}
+
+ // First arg was a number or fell through and will be treated as a number.
+ BIND(&if_arg1isnumber);
+ ConstructByLength(context, holder, arg1, element_size);
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(holder);
}
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
@@ -668,9 +741,8 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
BIND(&receiver_is_incompatible);
{
// The {receiver} is not a valid JSTypedArray.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
}
@@ -707,57 +779,156 @@ TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
}
+TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+ TNode<Word32T> kind) {
+ return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
+}
+
TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
- TNode<Object> typed_array) {
- CSA_ASSERT(this, IsJSTypedArray(typed_array));
- return LoadMapElementsKind(LoadMap(CAST(typed_array)));
+ TNode<JSTypedArray> typed_array) {
+ return LoadMapElementsKind(LoadMap(typed_array));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TNode<Word32T> elements_kind) {
TVARIABLE(IntPtrT, element_size);
- Label next(this), if_unknown_type(this, Label::kDeferred);
- size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_fun_index) {
+ element_size = IntPtrConstant(size);
+ });
- int32_t elements_kinds[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ return element_size.value();
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label if_##type##array(this);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(IntPtrT, context_slot);
+ TNode<Word32T> elements_kind = LoadElementsKind(exemplar);
- Label* elements_kind_labels[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_function_index) {
+ context_slot = IntPtrConstant(typed_array_function_index);
+ });
- Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
+ return LoadContextElement(LoadNativeContext(context), context_slot.value());
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- BIND(&if_##type##array); \
- { \
- element_size = IntPtrConstant(size); \
- Goto(&next); \
- }
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(Object, var_constructor);
+ Label slow(this), done(this);
- BIND(&if_unknown_type);
+ // Let defaultConstructor be the intrinsic object listed in column one of
+ // Table 52 for exemplar.[[TypedArrayName]].
+ TNode<Object> default_constructor = GetDefaultConstructor(context, exemplar);
+
+ var_constructor = default_constructor;
+ Node* map = LoadMap(exemplar);
+ GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
+ Branch(IsSpeciesProtectorCellInvalid(), &slow, &done);
+
+ BIND(&slow);
+ var_constructor =
+ CAST(SpeciesConstructor(context, exemplar, default_constructor));
+ Goto(&done);
+
+ BIND(&done);
+ return var_constructor.value();
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer, TNode<Number> byte_offset, TNode<Smi> len,
+ const char* method_name) {
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object =
+ CAST(ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ buffer, byte_offset, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ return ValidateTypedArray(context, new_object, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByLength(
+ TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
+ const char* method_name) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(len));
+
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+ CSA_ASSERT(this, IsJSFunction(constructor));
+
+ return CreateByLength(context, constructor, len, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
+ TNode<Context> context, TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name) {
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object = CAST(ConstructJS(CodeFactory::Construct(isolate()),
+ context, constructor, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ TNode<JSTypedArray> new_typed_array =
+ ValidateTypedArray(context, new_object, method_name);
+
+ // If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
+ // exception.
+ Label if_length_is_not_short(this);
+ TNode<Smi> new_length =
+ LoadObjectField<Smi>(new_typed_array, JSTypedArray::kLengthOffset);
+ GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
+ ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
+
+ BIND(&if_length_is_not_short);
+ return new_typed_array;
+}
+
+TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
+ TNode<Context> context, TNode<JSTypedArray> array) {
+ Label call_runtime(this), done(this);
+ TVARIABLE(Object, var_result);
+
+ TNode<Object> buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &call_runtime);
+ TNode<UintPtrT> backing_store = LoadObjectField<UintPtrT>(
+ CAST(buffer), JSArrayBuffer::kBackingStoreOffset);
+ GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
+ var_result = buffer;
+ Goto(&done);
+
+ BIND(&call_runtime);
{
- element_size = IntPtrConstant(0);
- Goto(&next);
+ var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array);
+ Goto(&done);
}
- BIND(&next);
- return element_size;
+
+ BIND(&done);
+ return CAST(var_result.value());
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
+ TNode<Context> context, TNode<Object> obj, const char* method_name) {
+ Label validation_done(this);
+
+ // If it is not a typed array, throw
+ ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
+
+ // If the typed array's buffer is detached, throw
+ TNode<Object> buffer =
+ LoadObjectField(CAST(obj), JSTypedArray::kBufferOffset);
+ GotoIfNot(IsDetachedBuffer(buffer), &validation_done);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&validation_done);
+ return CAST(obj);
}
void TypedArrayBuiltinsAssembler::SetTypedArraySource(
@@ -801,7 +972,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
CSA_ASSERT(this,
UintPtrGreaterThanOrEqual(source_byte_length, IntPtrConstant(0)));
- Label call_memmove(this), fast_c_call(this), out(this);
+ Label call_memmove(this), fast_c_call(this), out(this), exception(this);
// A fast memmove call can be used when the source and target types are are
// the same or either Uint8 or Uint8Clamped.
@@ -823,6 +994,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
this, UintPtrGreaterThanOrEqual(
IntPtrMul(target_length, target_el_size), IntPtrConstant(0)));
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &exception);
+
TNode<IntPtrT> source_length =
LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
@@ -830,6 +1005,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
Goto(&out);
}
+ BIND(&exception);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+
BIND(&out);
}
@@ -871,6 +1049,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
}
BIND(&fast_c_call);
+ GotoIf(IsBigInt64ElementsKind(LoadElementsKind(target)), call_runtime);
CallCCopyFastNumberJSArrayElementsToTypedArray(context, source, target,
source_length, offset);
Goto(&out);
@@ -893,6 +1072,7 @@ void TypedArrayBuiltinsAssembler::
TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length,
TNode<IntPtrT> offset) {
+ CSA_ASSERT(this, Word32Not(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
isolate()));
@@ -913,6 +1093,56 @@ void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
offset);
}
+void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
+ TNode<JSTypedArray> source, TNode<JSTypedArray> dest, TNode<IntPtrT> start,
+ TNode<IntPtrT> end) {
+ TNode<ExternalReference> f = ExternalConstant(
+ ExternalReference::copy_typed_array_elements_slice(isolate()));
+ CallCFunction4(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::UintPtr(),
+ MachineType::UintPtr(), f, source, dest, start, end);
+}
+
+void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function) {
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ arraysize(elements_kinds));
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, size, Context::TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+ BIND(&next);
+}
+
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
@@ -998,6 +1228,193 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kNotTypedArray);
}
+// ES %TypedArray%.prototype.slice
+TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.slice";
+ Label call_c(this), call_memmove(this), if_count_is_not_zero(this),
+ if_typed_array_is_neutered(this, Label::kDeferred),
+ if_bigint_mixed_types(this, Label::kDeferred);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // Convert start offset argument to integer, and calculate relative offset.
+ TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ TNode<Smi> start_index =
+ SmiTag(ConvertToRelativeIndex(context, start, SmiUntag(source_length)));
+
+ // Convert end offset argument to integer, and calculate relative offset.
+ // If end offset is not given or undefined is given, set source_length to
+ // "end_index".
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ TNode<Smi> end_index =
+ Select<Smi>(IsUndefined(end), [=] { return source_length; },
+ [=] {
+ return SmiTag(ConvertToRelativeIndex(
+ context, end, SmiUntag(source_length)));
+ },
+ MachineRepresentation::kTagged);
+
+ // Create a result array by invoking TypedArraySpeciesCreate.
+ TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, count, method_name);
+
+ // If count is zero, return early.
+ GotoIf(SmiGreaterThan(count, SmiConstant(0)), &if_count_is_not_zero);
+ args.PopAndReturn(result_array);
+
+ BIND(&if_count_is_not_zero);
+ // Check the source array is neutered or not. We don't need to check if the
+ // result array is neutered or not since TypedArraySpeciesCreate checked it.
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
+ result_array, JSTypedArray::kBufferOffset))));
+ TNode<Object> receiver_buffer =
+ LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+
+ // result_array could be a different type from source or share the same
+ // buffer with the source because of custom species constructor.
+ // If the types of source and result array are the same and they are not
+ // sharing the same buffer, use memmove.
+ TNode<Word32T> source_el_kind = LoadElementsKind(source);
+ TNode<Word32T> target_el_kind = LoadElementsKind(result_array);
+ GotoIfNot(Word32Equal(source_el_kind, target_el_kind), &call_c);
+
+ TNode<Object> target_buffer =
+ LoadObjectField(result_array, JSTypedArray::kBufferOffset);
+ Branch(WordEqual(receiver_buffer, target_buffer), &call_c, &call_memmove);
+
+ BIND(&call_memmove);
+ {
+ GotoIfForceSlowPath(&call_c);
+
+ TNode<IntPtrT> target_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(result_array));
+ TNode<IntPtrT> source_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+
+ TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
+ TNode<IntPtrT> source_start_bytes =
+ IntPtrMul(SmiToIntPtr(start_index), source_el_size);
+ TNode<IntPtrT> source_start =
+ IntPtrAdd(source_data_ptr, source_start_bytes);
+
+ TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
+
+#ifdef DEBUG
+ TNode<IntPtrT> target_byte_length =
+ LoadAndUntagObjectField(result_array, JSTypedArray::kByteLengthOffset);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length));
+
+ TNode<IntPtrT> source_byte_length =
+ LoadAndUntagObjectField(source, JSTypedArray::kByteLengthOffset);
+ TNode<IntPtrT> source_size_in_bytes =
+ IntPtrSub(source_byte_length, source_start_bytes);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes));
+#endif // DEBUG
+
+ CallCMemmove(target_data_ptr, source_start, count_bytes);
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&call_c);
+ {
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &if_bigint_mixed_types);
+
+ CallCCopyTypedArrayElementsSlice(
+ source, result_array, SmiToIntPtr(start_index), SmiToIntPtr(end_index));
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&if_typed_array_is_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&if_bigint_mixed_types);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+}
+
+// ES %TypedArray%.prototype.subarray
+TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.subarray";
+ Label offset_done(this);
+
+ TVARIABLE(Smi, var_begin);
+ TVARIABLE(Smi, var_end);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ // 1. Let O be the this value.
+ // 3. If O does not have a [[TypedArrayName]] internal slot, throw a TypeError
+ // exception.
+ TNode<Object> receiver = args.GetReceiver();
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
+
+ TNode<JSTypedArray> source = CAST(receiver);
+
+ // 5. Let buffer be O.[[ViewedArrayBuffer]].
+ TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
+ // 6. Let srcLength be O.[[ArrayLength]].
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 7. Let relativeBegin be ? ToInteger(begin).
+ // 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
+ // 0); else let beginIndex be min(relativeBegin, srcLength).
+ TNode<Object> begin = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ var_begin =
+ SmiTag(ConvertToRelativeIndex(context, begin, SmiUntag(source_length)));
+
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ // 9. If end is undefined, let relativeEnd be srcLength;
+ var_end = source_length;
+ GotoIf(IsUndefined(end), &offset_done);
+
+ // else, let relativeEnd be ? ToInteger(end).
+ // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd), 0);
+ // else let endIndex be min(relativeEnd, srcLength).
+ var_end =
+ SmiTag(ConvertToRelativeIndex(context, end, SmiUntag(source_length)));
+ Goto(&offset_done);
+
+ BIND(&offset_done);
+
+ // 11. Let newLength be max(endIndex - beginIndex, 0).
+ TNode<Smi> new_length =
+ SmiMax(SmiSub(var_end.value(), var_begin.value()), SmiConstant(0));
+
+ // 12. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 13. Let elementSize be the Number value of the Element Size value specified
+ // in Table 52 for constructorName.
+ TNode<Word32T> element_kind = LoadElementsKind(source);
+ TNode<IntPtrT> element_size = GetTypedArrayElementSize(element_kind);
+
+ // 14. Let srcByteOffset be O.[[ByteOffset]].
+ TNode<Number> source_byte_offset =
+ LoadObjectField<Number>(source, JSTypedArray::kByteOffsetOffset);
+
+ // 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
+ TNode<Number> offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size));
+ TNode<Number> begin_byte_offset = NumberAdd(source_byte_offset, offset);
+
+ // 16. Let argumentsList be « buffer, beginByteOffset, newLength ».
+ // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ args.PopAndReturn(SpeciesCreateByArrayBuffer(
+ context, source, buffer, begin_byte_offset, new_length, method_name));
+}
+
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1045,7 +1462,6 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
IterationKind iteration_kind) {
Label throw_bad_receiver(this, Label::kDeferred);
- Label throw_typeerror(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
@@ -1063,22 +1479,11 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Return(CreateArrayIterator(receiver, map, instance_type, context,
iteration_kind));
- VARIABLE(var_message, MachineRepresentation::kTagged);
BIND(&throw_bad_receiver);
- var_message.Bind(SmiConstant(MessageTemplate::kNotTypedArray));
- Goto(&throw_typeerror);
+ ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
BIND(&if_receiverisneutered);
- var_message.Bind(SmiConstant(MessageTemplate::kDetachedOperation));
- Goto(&throw_typeerror);
-
- BIND(&throw_typeerror);
- {
- Node* method_arg = StringConstant(method_name);
- Node* result = CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), method_arg);
- Return(result);
- }
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
// ES6 #sec-%typedarray%.prototype.values
@@ -1107,6 +1512,427 @@ TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys);
}
+// ES6 #sec-%typedarray%.of
+TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // 1. Let len be the actual number of arguments passed to this function.
+ TNode<IntPtrT> length = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ // 2. Let items be the List of arguments passed to this function.
+ CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS,
+ CodeStubArguments::ReceiverMode::kHasReceiver);
+
+ Label if_not_constructor(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ // 3. Let C be the this value.
+ // 4. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 5. Let newObj be ? TypedArrayCreate(C, len).
+ TNode<JSTypedArray> new_typed_array =
+ CreateByLength(context, receiver, SmiTag(length), "%TypedArray%.of");
+
+ TNode<Word32T> elements_kind = LoadElementsKind(new_typed_array);
+
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ // a. Let kValue be items[k].
+ // b. Let Pk be ! ToString(k).
+ // c. Perform ? Set(newObj, Pk, kValue, true).
+ // d. Increase k by 1.
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<FixedTypedArrayBase> elements =
+ CAST(LoadElements(new_typed_array));
+ BuildFastLoop(
+ IntPtrConstant(0), length,
+ [&](Node* index) {
+ TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
+ TNode<IntPtrT> intptr_index = UncheckedCast<IntPtrT>(index);
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(new_typed_array, elements,
+ intptr_index, item, context,
+ &if_neutered);
+ } else {
+ Node* value =
+ PrepareValueForWriteToTypedArray(item, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(new_typed_array,
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in ToNumber, thus load backing
+ // store everytime in this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, value,
+ INTPTR_PARAMETERS);
+ }
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ });
+
+ // 8. Return newObj.
+ args.PopAndReturn(new_typed_array);
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.of");
+}
+
+TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+
+ Label fast_path(this), slow_path(this), done(this);
+
+ TVARIABLE(JSArray, created_list);
+
+ // This is a fast-path for ignoring the iterator.
+ // TODO(petermarshall): Port to CSA.
+ Node* elided =
+ CallRuntime(Runtime::kIterableToListCanBeElided, context, iterable);
+ CSA_ASSERT(this, IsBoolean(elided));
+ Branch(IsTrue(elided), &fast_path, &slow_path);
+
+ BIND(&fast_path);
+ {
+ created_list = CAST(iterable);
+ Goto(&done);
+ }
+
+ BIND(&slow_path);
+ {
+ IteratorBuiltinsAssembler iterator_assembler(state());
+
+ // 1. Let iteratorRecord be ? GetIterator(items, method).
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, iterable, iterator_fn);
+
+ // 2. Let values be a new empty List.
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), loop_end(this);
+ Goto(&loop_start);
+ // 3. Let next be true.
+ // 4. Repeat, while next is not false
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<Object> next = CAST(
+ iterator_assembler.IteratorStep(context, iterator_record, &loop_end));
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value =
+ CAST(iterator_assembler.IteratorValue(context, next));
+ // ii. Append nextValue to the end of the List values.
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
+ BIND(&loop_end);
+
+ // 5. Return values.
+ TNode<JSArray> js_array_values = values.ToJSArray(context);
+ created_list = js_array_values;
+ Goto(&done);
+ }
+
+ BIND(&done);
+ Return(created_list.value());
+}
+
+// ES6 #sec-%typedarray%.from
+TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Label check_iterator(this), from_array_like(this), fast_path(this),
+ slow_path(this), create_typed_array(this),
+ if_not_constructor(this, Label::kDeferred),
+ if_map_fn_not_callable(this, Label::kDeferred),
+ if_iterator_fn_not_callable(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ TNode<Object> source = args.GetOptionalArgumentValue(0);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+
+ // 1. Let C be the this value.
+ // 2. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 3. If mapfn is present and mapfn is not undefined, then
+ TNode<Object> map_fn = args.GetOptionalArgumentValue(1);
+ TVARIABLE(BoolT, mapping, Int32FalseConstant());
+ GotoIf(IsUndefined(map_fn), &check_iterator);
+
+ // a. If IsCallable(mapfn) is false, throw a TypeError exception.
+ // b. Let mapping be true.
+ // 4. Else, let mapping be false.
+ GotoIf(TaggedIsSmi(map_fn), &if_map_fn_not_callable);
+ GotoIfNot(IsCallable(map_fn), &if_map_fn_not_callable);
+ mapping = Int32TrueConstant();
+ Goto(&check_iterator);
+
+ TVARIABLE(Object, final_source);
+ TVARIABLE(Smi, final_length);
+
+ // We split up this builtin differently to the way it is written in the spec.
+ // We already have great code in the elements accessor for copying from a
+ // JSArray into a TypedArray, so we use that when possible. We only avoid
+ // calling into the elements accessor when we have a mapping function, because
+ // we can't handle that. Here, presence of a mapping function is the slow
+ // path. We also combine the two different loops in the specification
+ // (starting at 7.e and 13) because they are essentially identical. We also
+ // save on code-size this way.
+
+ BIND(&check_iterator);
+ {
+ // 6. Let usingIterator be ? GetMethod(source, @@iterator).
+ TNode<Object> iterator_fn =
+ CAST(GetMethod(context, source, isolate()->factory()->iterator_symbol(),
+ &from_array_like));
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+ GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
+
+ // We are using the iterator.
+ Label if_length_not_smi(this, Label::kDeferred);
+ // 7. If usingIterator is not undefined, then
+ // a. Let values be ? IterableToList(source, usingIterator).
+ // b. Let len be the number of elements in values.
+ TNode<JSArray> values = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, source, iterator_fn));
+
+ // This is not a spec'd limit, so it doesn't particularly matter when we
+ // throw the range error for typed array length > MaxSmi.
+ TNode<Object> raw_length = LoadJSArrayLength(values);
+ GotoIfNot(TaggedIsSmi(raw_length), &if_length_not_smi);
+
+ final_length = CAST(raw_length);
+ final_source = values;
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ BIND(&from_array_like);
+ {
+ Label if_length_not_smi(this, Label::kDeferred);
+ final_source = source;
+
+ // 10. Let len be ? ToLength(? Get(arrayLike, "length")).
+ TNode<Object> raw_length =
+ GetProperty(context, final_source.value(), LengthStringConstant());
+ final_length = ToSmiLength(raw_length, context, &if_length_not_smi);
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ TVARIABLE(JSTypedArray, target_obj);
+
+ BIND(&create_typed_array);
+ {
+ // 7c/11. Let targetObj be ? TypedArrayCreate(C, «len»).
+ target_obj = CreateByLength(context, receiver, final_length.value(),
+ "%TypedArray%.from");
+
+ Branch(mapping.value(), &slow_path, &fast_path);
+ }
+
+ BIND(&fast_path);
+ {
+ Label done(this);
+ GotoIf(SmiEqual(final_length.value(), SmiConstant(0)), &done);
+
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, target_obj.value(),
+ final_source.value(), final_length.value());
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(target_obj.value());
+ }
+
+ BIND(&slow_path);
+ TNode<Word32T> elements_kind = LoadElementsKind(target_obj.value());
+
+ // 7e/13 : Copy the elements
+ TNode<FixedTypedArrayBase> elements = CAST(LoadElements(target_obj.value()));
+ BuildFastLoop(
+ SmiConstant(0), final_length.value(),
+ [&](Node* index) {
+ TNode<Object> const k_value =
+ GetProperty(context, final_source.value(), index);
+
+ TNode<Object> const mapped_value =
+ CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
+ k_value, index));
+
+ TNode<IntPtrT> intptr_index = SmiUntag(index);
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(target_obj.value(), elements,
+ intptr_index, mapped_value,
+ context, &if_neutered);
+ } else {
+ Node* const final_value = PrepareValueForWriteToTypedArray(
+ mapped_value, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(target_obj.value(),
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in map_fn, thus load backing
+ // store in each iteration of this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, final_value,
+ SMI_PARAMETERS);
+ }
+ });
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ args.PopAndReturn(target_obj.value());
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_map_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_fn);
+
+ BIND(&if_iterator_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.from");
+}
+
+// ES %TypedArray%.prototype.filter
+TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.filter";
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ Label if_callback_not_callable(this, Label::kDeferred),
+ detached(this, Label::kDeferred);
+
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ // 3. Let len be O.[[ArrayLength]].
+ TNode<Smi> length = LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
+ GotoIf(TaggedIsSmi(callbackfn), &if_callback_not_callable);
+ GotoIfNot(IsCallable(callbackfn), &if_callback_not_callable);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
+
+ TNode<JSArrayBuffer> source_buffer =
+ LoadObjectField<JSArrayBuffer>(source, JSArrayBufferView::kBufferOffset);
+ TNode<Word32T> elements_kind = LoadElementsKind(source);
+ GrowableFixedArray values(state());
+ VariableList vars(
+ {values.var_array(), values.var_length(), values.var_capacity()}, zone());
+
+ // 6. Let kept be a new empty List.
+ // 7. Let k be 0.
+ // 8. Let captured be 0.
+ // 9. Repeat, while k < len
+ BuildFastLoop(
+ vars, SmiConstant(0), length,
+ [&](Node* index) {
+ GotoIf(IsDetachedBuffer(source_buffer), &detached);
+
+ TVARIABLE(Numeric, value);
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(O, Pk).
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<IntPtrT> backing_store =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+ value = CAST(LoadFixedTypedArrayElementAsTagged(
+ backing_store, index, kind, ParameterMode::SMI_PARAMETERS));
+ });
+
+ // c. Let selected be ToBoolean(Call(callbackfn, T, kValue, k, O))
+ Node* selected =
+ CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
+ value.value(), index, source);
+
+ Label true_continue(this), false_continue(this);
+ BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
+
+ BIND(&true_continue);
+ // d. If selected is true, then
+ // i. Append kValue to the end of kept.
+ // ii. Increase captured by 1.
+ values.Push(value.value());
+ Goto(&false_continue);
+
+ BIND(&false_continue);
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ TNode<JSArray> values_array = values.ToJSArray(context);
+ TNode<Smi> captured = LoadFastJSArrayLength(values_array);
+
+ // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, captured, method_name);
+
+ // 11. Let n be 0.
+ // 12. For each element e of kept, do
+ // a. Perform ! Set(A, ! ToString(n), e, true).
+ // b. Increment n by 1.
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, result_array,
+ values_array, captured);
+
+ // 13. Return A.
+ args.PopAndReturn(result_array);
+
+ BIND(&if_callback_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, callbackfn);
+
+ BIND(&detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+}
+
#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.h b/deps/v8/src/builtins/builtins-typedarray-gen.h
new file mode 100644
index 0000000000..37f923dea6
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.h
@@ -0,0 +1,133 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<JSTypedArray> SpeciesCreateByLength(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<Smi> len,
+ const char* method_name);
+
+ protected:
+ void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
+ const char* method_name,
+ int object_offset);
+ void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ const char* method_name,
+ IterationKind iteration_kind);
+
+ void ConstructByLength(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<Object> length, TNode<Smi> element_size);
+ void ConstructByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Object> byte_offset, TNode<Object> length,
+ TNode<Smi> element_size);
+ void ConstructByTypedArray(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array,
+ TNode<Smi> element_size);
+ void ConstructByArrayLike(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like,
+ TNode<Object> initial_length,
+ TNode<Smi> element_size);
+ void ConstructByIterable(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable,
+ TNode<Object> iterator_fn, TNode<Smi> element_size);
+
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ TNode<Number> byte_offset, TNode<Number> byte_length);
+ void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
+ TNode<Map> map, TNode<Smi> length,
+ TNode<Number> byte_offset);
+
+ TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
+ TNode<Number> byte_offset);
+ Node* LoadDataPtr(Node* typed_array);
+ TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
+
+ // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
+ TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+
+ // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
+ TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
+
+ // Loads the element kind of TypedArray instance.
+ TNode<Word32T> LoadElementsKind(TNode<JSTypedArray> typed_array);
+
+ // Returns the byte size of an element for a TypedArray elements kind.
+ TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
+
+ TNode<Object> GetDefaultConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<Object> TypedArraySpeciesConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<JSTypedArray> SpeciesCreateByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Number> byte_offset,
+ TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSTypedArray> CreateByLength(TNode<Context> context,
+ TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSArrayBuffer> GetBuffer(TNode<Context> context,
+ TNode<JSTypedArray> array);
+
+ TNode<JSTypedArray> ValidateTypedArray(TNode<Context> context,
+ TNode<Object> obj,
+ const char* method_name);
+
+ // Fast path for setting a TypedArray (source) onto another TypedArray
+ // (target) at an element offset.
+ void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
+ TNode<IntPtrT> byte_length);
+
+ void CallCCopyFastNumberJSArrayElementsToTypedArray(
+ TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length,
+ TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsSlice(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> start,
+ TNode<IntPtrT> end);
+
+ typedef std::function<void(ElementsKind, int, int)> TypedArraySwitchCase;
+
+ void DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 18625c8d90..6fcc279c66 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -42,16 +42,6 @@ int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
: std::min<int64_t>(relative, maximum);
}
-MaybeHandle<JSTypedArray> TypedArraySpeciesCreateByLength(
- Isolate* isolate, Handle<JSTypedArray> exemplar, const char* method_name,
- int64_t length) {
- const int argc = 1;
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = isolate->factory()->NewNumberFromInt64(length);
- return JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(),
- method_name);
-}
-
} // namespace
BUILTIN(TypedArrayPrototypeCopyWithin) {
@@ -124,10 +114,16 @@ BUILTIN(TypedArrayPrototypeFill) {
const char* method = "%TypedArray%.prototype.fill";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj_value, Object::ToNumber(obj_value));
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ BigInt::FromObject(isolate, obj_value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ Object::ToNumber(obj_value));
+ }
int64_t len = array->length_value();
int64_t start = 0;
@@ -161,9 +157,9 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return array->GetElementsAccessor()->Fill(isolate, array, obj_value,
- static_cast<uint32_t>(start),
- static_cast<uint32_t>(end));
+ return ElementsAccessor::ForKind(kind)->Fill(isolate, array, obj_value,
+ static_cast<uint32_t>(start),
+ static_cast<uint32_t>(end));
}
BUILTIN(TypedArrayPrototypeIncludes) {
@@ -277,49 +273,5 @@ BUILTIN(TypedArrayPrototypeReverse) {
return *array;
}
-BUILTIN(TypedArrayPrototypeSlice) {
- HandleScope scope(isolate);
-
- Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.slice";
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
-
- int64_t len = array->length_value();
- int64_t start = 0;
- int64_t end = len;
- {
- Handle<Object> num = args.atOrUndefined(isolate, 1);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- start = CapRelativeIndex(num, 0, len);
-
- num = args.atOrUndefined(isolate, 2);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- end = CapRelativeIndex(num, 0, len);
- }
- }
- }
-
- int64_t count = std::max<int64_t>(end - start, 0);
-
- Handle<JSTypedArray> result_array;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_array,
- TypedArraySpeciesCreateByLength(isolate, array, method, count));
-
- // TODO(cwhan.tunz): should throw.
- if (V8_UNLIKELY(array->WasNeutered())) return *result_array;
-
- if (count == 0) return *result_array;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- return *accessor->Slice(array, static_cast<uint32_t>(start),
- static_cast<uint32_t>(end), result_array);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index dc175e50b7..ad1763a292 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -86,8 +86,9 @@ Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
void Builtins::TearDown() { initialized_ = false; }
void Builtins::IterateBuiltins(RootVisitor* v) {
- v->VisitRootPointers(Root::kBuiltins, &builtins_[0],
- &builtins_[0] + builtin_count);
+ for (int i = 0; i < builtin_count; i++) {
+ v->VisitRootPointer(Root::kBuiltins, name(i), &builtins_[i]);
+ }
}
const char* Builtins::Lookup(byte* pc) {
@@ -170,30 +171,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN)
#undef CASE_OTHER
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEach:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kConsoleAssert:
- return Callable(code, BuiltinDescriptor(isolate));
default:
+ Builtins::Kind kind = Builtins::KindOf(name);
+ if (kind == TFJ || kind == CPP) {
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
UNREACHABLE();
}
CallInterfaceDescriptor descriptor(isolate, key);
@@ -213,6 +195,22 @@ Address Builtins::CppEntryOf(int index) {
}
// static
+bool Builtins::IsBuiltin(Code* code) {
+ return Builtins::IsBuiltinId(code->builtin_index());
+}
+
+// static
+bool Builtins::IsOffHeapBuiltin(Code* code) {
+#ifdef V8_EMBEDDED_BUILTINS
+ return FLAG_stress_off_heap_code &&
+ Builtins::IsBuiltinId(code->builtin_index()) &&
+ Builtins::IsOffHeapSafe(code->builtin_index());
+#else
+ return false;
+#endif
+}
+
+// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
// There are a couple of reasons that builtins can require eager-loading,
@@ -245,12 +243,16 @@ bool Builtins::IsLazy(int index) {
case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReducePreLoopEagerDeoptContinuation:
case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArrayReduceRightLoopEagerDeoptContinuation:
case kArrayReduceRightLoopLazyDeoptContinuation:
case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
@@ -261,9 +263,11 @@ bool Builtins::IsLazy(int index) {
case kInterpreterEnterBytecodeDispatch:
case kInterpreterEntryTrampoline:
case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kPromiseConstructorLazyDeoptContinuation: // crbug/v8/6786.
case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kTypedArrayConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kProxyConstructor: // https://crbug.com/v8/6787.
case kRecordWrite: // https://crbug.com/chromium/765301.
case kThrowWasmTrapDivByZero: // Required by wasm.
@@ -286,6 +290,1090 @@ bool Builtins::IsLazy(int index) {
}
// static
+bool Builtins::IsIsolateIndependent(int index) {
+ DCHECK(IsBuiltinId(index));
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsOffHeapSafe(int index) {
+#ifndef V8_EMBEDDED_BUILTINS
+ return false;
+#else
+ DCHECK(IsBuiltinId(index));
+ if (IsTooShortForOffHeapTrampoline(index)) return false;
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif // !DEBUG
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+#endif // V8_EMBEDDED_BUILTINS
+}
+
+// static
+bool Builtins::IsTooShortForOffHeapTrampoline(int index) {
+ switch (index) {
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
Builtins::Kind Builtins::KindOf(int index) {
DCHECK(IsBuiltinId(index));
return builtin_metadata[index].kind;
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index d9090dc67e..bf96469d19 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -109,11 +109,33 @@ class Builtins {
static bool IsCpp(int index);
static bool HasCppImplementation(int index);
+ // True, iff the given code object is a builtin. Note that this does not
+ // necessarily mean that its kind is Code::BUILTIN.
+ static bool IsBuiltin(Code* code);
+
+ // True, iff the given code object is a builtin with off-heap code.
+ static bool IsOffHeapBuiltin(Code* code);
+
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
// special cases such as CompileLazy and DeserializeLazy.
static bool IsLazy(int index);
+ // Helper methods used for testing isolate-independent builtins.
+ // TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
+ static bool IsIsolateIndependent(int index);
+
+ // This is the condition we currently use to determine whether a builtin is
+ // copied off-heap when --stress-off-heap-code is passed. Such builtins do not
+ // need to be isolate-independent, e.g. they can contain external references
+ // that point to one specific isolate. A further restrictions is that there
+ // must be enough space for the trampoline.
+ static bool IsOffHeapSafe(int index);
+
+ // The off-heap trampoline is short but requires a certain minimal instruction
+ // size. This function states whether a given builtin is too short.
+ static bool IsTooShortForOffHeapTrampoline(int index);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
new file mode 100644
index 0000000000..a4117bd5a2
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/constants-table-builder.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
+ : isolate_(isolate), map_(isolate->heap()) {
+ // Ensure this is only called once per Isolate.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ // And that the initial value of the builtins constants table can be treated
+ // as a constant, which means that codegen will load it using the root
+ // register.
+ DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant(
+ Heap::kEmptyFixedArrayRootIndex));
+}
+
+uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
+#ifdef DEBUG
+ // Roots must not be inserted into the constants table as they are already
+ // accessibly from the root list.
+ Heap::RootListIndex root_list_index;
+ DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
+
+ // Not yet finalized.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+#endif
+
+ uint32_t* maybe_key = map_.Find(object);
+ if (maybe_key == nullptr) {
+ uint32_t index = map_.size();
+ map_.Set(object, index);
+ return index;
+ } else {
+ return *maybe_key;
+ }
+}
+
+void BuiltinsConstantsTableBuilder::Finalize() {
+ HandleScope handle_scope(isolate_);
+
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ DCHECK_LT(0, map_.size());
+ Handle<FixedArray> table =
+ isolate_->factory()->NewFixedArray(map_.size(), TENURED);
+
+ Builtins* builtins = isolate_->builtins();
+ ConstantsMap::IteratableScope it_scope(&map_);
+ for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
+ uint32_t index = *it.entry();
+ Object* value = it.key();
+ if (value->IsCode() && Code::cast(value)->kind() == Code::BUILTIN) {
+ // Replace placeholder code objects with the real builtin.
+ // See also: SetupIsolateDelegate::PopulateWithPlaceholders.
+ // TODO(jgruber): Deduplicate placeholders and their corresponding
+ // builtin.
+ value = builtins->builtin(Code::cast(value)->builtin_index());
+ }
+ table->set(index, value);
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < map_.size(); i++) {
+ DCHECK(table->get(i)->IsHeapObject());
+ DCHECK_NE(isolate_->heap()->undefined_value(), table->get(i));
+ }
+#endif
+
+ isolate_->heap()->SetBuiltinsConstantsTable(*table);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
new file mode 100644
index 0000000000..d251d5849b
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+#define V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+#include "src/handles.h"
+#include "src/identity-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+
+// Utility class to build the builtins constants table and store it on the root
+// list. The constants table contains constants used by builtins, and is there
+// to avoid directly embedding them into code objects, which would not be
+// possible for off-heap (and thus immutable) code objects.
+class BuiltinsConstantsTableBuilder final {
+ public:
+ explicit BuiltinsConstantsTableBuilder(Isolate* isolate);
+
+ // Returns the index within the builtins constants list for the given object,
+ // possibly adding the object to the cache. Objects are deduplicated.
+ uint32_t AddObject(Handle<Object> object);
+
+ // Should be called after all affected code (e.g. builtins and bytecode
+ // handlers) has been generated.
+ void Finalize();
+
+ private:
+ Isolate* isolate_;
+
+ // Maps objects to corresponding indices within the constants list.
+ typedef IdentityMap<uint32_t, FreeStoreAllocationPolicy> ConstantsMap;
+ ConstantsMap map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
new file mode 100644
index 0000000000..3a155e26f9
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/growable-fixed-array-gen.h"
+
+#include "src/compiler/code-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void GrowableFixedArray::Push(TNode<Object> const value) {
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ Label grow(this), store(this);
+ Branch(IntPtrEqual(capacity, length), &grow, &store);
+
+ BIND(&grow);
+ {
+ var_capacity_ = NewCapacity(capacity);
+ var_array_ = ResizeFixedArray(length, var_capacity_.value());
+
+ Goto(&store);
+ }
+
+ BIND(&store);
+ {
+ TNode<FixedArray> const array = var_array_.value();
+ StoreFixedArrayElement(array, length, value);
+
+ var_length_ = IntPtrAdd(length, IntPtrConstant(1));
+ }
+}
+
+TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
+ const ElementsKind kind = PACKED_ELEMENTS;
+
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Map> const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ // Shrink to fit if necessary.
+ {
+ Label next(this);
+
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ GotoIf(WordEqual(length, capacity), &next);
+
+ var_array_ = ResizeFixedArray(length, length);
+ var_capacity_ = length;
+ Goto(&next);
+
+ BIND(&next);
+ }
+
+ TNode<Smi> const result_length = SmiTag(length());
+ TNode<JSArray> const result =
+ CAST(AllocateUninitializedJSArrayWithoutElements(array_map, result_length,
+ nullptr));
+
+ StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
+
+ return result;
+}
+
+TNode<IntPtrT> GrowableFixedArray::NewCapacity(
+ TNode<IntPtrT> current_capacity) {
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(current_capacity, IntPtrConstant(0)));
+
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+
+ TNode<IntPtrT> const new_capacity =
+ IntPtrAdd(IntPtrAdd(current_capacity, WordShr(current_capacity, 1)),
+ IntPtrConstant(16));
+
+ return new_capacity;
+}
+
+TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
+ TNode<IntPtrT> const element_count, TNode<IntPtrT> const new_capacity) {
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
+
+ TNode<FixedArray> const from_array = var_array_.value();
+
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
+ TNode<FixedArray> to_array = CAST(ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags));
+
+ return to_array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
new file mode 100644
index 0000000000..f720659dee
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+#define V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+template <class T>
+using TNode = compiler::TNode<T>;
+
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray : public CodeStubAssembler {
+ public:
+ explicit GrowableFixedArray(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ var_array_(this),
+ var_length_(this),
+ var_capacity_(this) {
+ var_array_ = EmptyFixedArrayConstant();
+ var_capacity_ = IntPtrConstant(0);
+ var_length_ = IntPtrConstant(0);
+ }
+
+ TNode<IntPtrT> length() const { return var_length_.value(); }
+
+ TVariable<FixedArray>* var_array() { return &var_array_; }
+ TVariable<IntPtrT>* var_length() { return &var_length_; }
+ TVariable<IntPtrT>* var_capacity() { return &var_capacity_; }
+
+ void Push(TNode<Object> const value);
+
+ TNode<JSArray> ToJSArray(TNode<Context> const context);
+
+ private:
+ TNode<IntPtrT> NewCapacity(TNode<IntPtrT> current_capacity);
+
+ // Creates a new array with {new_capacity} and copies the first
+ // {element_count} elements from the current array.
+ TNode<FixedArray> ResizeFixedArray(TNode<IntPtrT> const element_count,
+ TNode<IntPtrT> const new_capacity);
+
+ private:
+ TVariable<FixedArray> var_array_;
+ TVariable<IntPtrT> var_length_;
+ TVariable<IntPtrT> var_capacity_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 368e6670c1..3319dd0c51 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -101,7 +101,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ push(edi);
__ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
+ __ mov(ecx, eax);
// Restore target function and new target.
__ pop(edx);
@@ -110,15 +110,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(eax);
}
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
namespace {
@@ -224,7 +218,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -345,7 +339,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -590,6 +584,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -748,10 +743,12 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
- __ add(optimized_code_entry, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ Move(ecx, optimized_code_entry);
+ __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(edx);
__ pop(eax);
- __ jmp(optimized_code_entry);
+ __ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -767,10 +764,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -780,11 +780,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -792,7 +792,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -801,8 +801,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -828,7 +837,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -931,9 +940,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -946,16 +956,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1262,9 +1269,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1280,8 +1288,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
@@ -1289,6 +1299,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1306,7 +1320,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1319,7 +1333,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1348,7 +1366,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1531,6 +1549,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -2093,7 +2112,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2566,6 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -2581,6 +2601,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index 978563cab5..cf2df277c9 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 7af02bb32e..e2d4421f86 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,7 +175,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -285,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -406,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -656,8 +652,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -807,7 +805,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Jump(optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -821,10 +821,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
__ lw(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1003,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a0, MemOperand(a0));
- __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ lw(at, MemOperand(at));
- __ Call(at);
+ __ lbu(t3, MemOperand(a0));
+ __ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1007,18 +1018,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
-
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1226,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a1, MemOperand(a1));
- __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ lw(a1, MemOperand(a1));
- __ Jump(a1);
+ __ lbu(t3, MemOperand(a1));
+ __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1248,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1279,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1326,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1484,8 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -1984,7 +2003,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2510,8 +2529,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3 : new target (passed through to callee)
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Call(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2524,8 +2545,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 266393070c..80ac1fadb1 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -287,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -408,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Daddu(optimized_code_entry, optimized_code_entry,
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -822,10 +819,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +834,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +846,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +856,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
__ Lw(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +894,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1000,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a0, MemOperand(a0));
- __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ Ld(at, MemOperand(at));
- __ Call(at);
+ __ Lbu(a7, MemOperand(a0));
+ __ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1008,17 +1016,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1224,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a1, MemOperand(a1));
- __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ Ld(a1, MemOperand(a1));
- __ Jump(a1);
+ __ Lbu(a7, MemOperand(a1));
+ __ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1246,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1277,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1291,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1324,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1486,9 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ld(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -1624,6 +1640,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
@@ -1732,6 +1749,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
@@ -1786,6 +1804,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register undefined_value = a4;
Register scratch = a5;
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
@@ -2008,7 +2028,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2531,9 +2551,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3: new target (passed through to callee)
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2546,9 +2567,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 34da70ff0f..7ae635b0c1 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r3, r4, r6);
__ SmiUntag(r3);
}
- __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -293,7 +287,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
// If not derived class constructor: Allocate the new receiver object.
@@ -420,7 +414,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver, cr0);
} else {
@@ -563,9 +557,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ mr(r6, r4);
__ mr(r4, r7);
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
__ bind(&prepare_step_in_if_stepping);
@@ -827,10 +822,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addi(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, optimized_code_entry);
- __ Jump(optimized_code_entry);
+ __ mr(ip, r5);
+ __ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpi(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -879,7 +878,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpi(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@@ -908,7 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1021,11 +1030,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r6));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1039,16 +1049,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmpi(r4, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1251,11 +1258,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1271,8 +1279,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
@@ -1280,6 +1290,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1297,7 +1311,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r5;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1311,7 +1325,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1340,7 +1358,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1524,9 +1542,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -2051,7 +2070,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2443,8 +2462,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r3, r5);
__ blt(&too_few);
__ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2460,7 +2477,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
@@ -2474,7 +2490,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// r6: new target (passed through to callee)
// r7: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2498,7 +2513,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -2507,7 +2521,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2521,7 +2534,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
@@ -2543,7 +2555,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : expected number of arguments
// r4 : function (passed through to callee)
// r6 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r5);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2556,7 +2571,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 020b04b91d..9d7bc3fb80 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r2, r3, r5);
__ SmiUntag(r2);
}
- __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -288,7 +282,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -414,7 +408,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -558,9 +552,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
__ bind(&prepare_step_in_if_stepping);
@@ -830,9 +825,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ AddP(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -846,10 +842,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -859,11 +858,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ CmpP(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -871,7 +870,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -881,7 +880,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ CmpP(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@@ -911,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1020,11 +1029,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r5));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1038,16 +1048,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ CmpP(r3, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1248,11 +1255,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1268,8 +1276,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
@@ -1277,6 +1287,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1294,7 +1308,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r4;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1308,7 +1322,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1337,7 +1355,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1520,9 +1538,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -2048,7 +2067,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2442,8 +2461,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CmpP(r2, r4);
__ blt(&too_few);
__ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2459,7 +2476,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
@@ -2473,7 +2489,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: expected number of arguments
// r5: new target (passed through to callee)
// r6: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2497,7 +2512,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ lay(r2, MemOperand(r2, fp));
@@ -2506,7 +2520,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2519,7 +2532,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
@@ -2541,7 +2553,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r2 : expected number of arguments
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2554,7 +2569,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 5a09658867..d30cd02ab5 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -186,7 +186,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_address(isolate, new_target->instruction_start(),
+ rinfo->set_target_address(new_target->instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
@@ -202,7 +202,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(isolate, code->instruction_start(),
+ Assembler::FlushICache(code->instruction_start(),
code->instruction_size());
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index cd35abb362..898fe9c14c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -87,15 +87,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movp(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- __ jmp(kScratchRegister);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -115,7 +106,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Push(rdi);
__ CallRuntime(function_id, 1);
- __ movp(rbx, rax);
+ __ movp(rcx, rax);
// Restore target function and new target.
__ Pop(rdx);
@@ -123,8 +114,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rax);
__ SmiToInteger32(rax, rax);
}
- __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
namespace {
@@ -230,7 +222,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
@@ -350,7 +342,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -660,6 +652,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -820,9 +813,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addp(optimized_code_entry,
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(optimized_code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ Move(rcx, optimized_code_entry);
+ __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -836,10 +830,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -848,11 +845,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate()));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -860,7 +857,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -869,8 +866,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -896,7 +902,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1000,11 +1006,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ call(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1018,16 +1025,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1200,6 +1204,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ // TODO(jgruber,v8:6666): Update logic once builtin is off-heap-safe.
+ DCHECK(!Builtins::IsOffHeapSafe(Builtins::kInterpreterEntryTrampoline));
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
@@ -1234,11 +1240,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ jmp(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1255,14 +1262,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1280,7 +1293,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1293,7 +1306,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
@@ -1325,7 +1342,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -2015,6 +2032,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rax : expected number of arguments
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
@@ -2030,6 +2048,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -2196,7 +2215,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 4199ec3bbe..125d15d61b 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -9,15 +9,22 @@
#include "src/base/platform/mutex.h"
#include "src/globals.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
class AbstractCode;
+class InstructionStream;
class Name;
class SharedFunctionInfo;
class String;
+namespace wasm {
+class WasmCode;
+using WasmName = Vector<const char>;
+} // namespace wasm
+
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
@@ -64,10 +71,15 @@ class CodeEventListener {
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source,
int line, int column) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) = 0;
virtual void CallbackEvent(Name* name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+ virtual void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) = 0;
virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
@@ -114,6 +126,10 @@ class CodeEventDispatcher {
CODE_EVENT_DISPATCH(
CodeCreateEvent(tag, code, shared, source, line, column));
}
+ void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
+ }
void CallbackEvent(Name* name, Address entry_point) {
CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
}
@@ -126,6 +142,11 @@ class CodeEventDispatcher {
void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
}
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) {
+ CODE_EVENT_DISPATCH(InstructionStreamCreateEvent(tag, stream, description));
+ }
void CodeMoveEvent(AbstractCode* from, Address to) {
CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
}
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 2027d208ab..d3c81d0e81 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -1,7 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "src/code-stub-assembler.h"
+
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
@@ -45,6 +47,23 @@ void CodeStubAssembler::HandleBreakOnNode() {
BreakOnNode(node_id);
}
+void CodeStubAssembler::Assert(const BranchGenerator& branch,
+ const char* message, const char* file, int line,
+ Node* extra_node1, const char* extra_node1_name,
+ Node* extra_node2, const char* extra_node2_name,
+ Node* extra_node3, const char* extra_node3_name,
+ Node* extra_node4, const char* extra_node4_name,
+ Node* extra_node5,
+ const char* extra_node5_name) {
+#if defined(DEBUG)
+ if (FLAG_debug_code) {
+ Check(branch, message, file, line, extra_node1, extra_node1_name,
+ extra_node2, extra_node2_name, extra_node3, extra_node3_name,
+ extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ }
+#endif
+}
+
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
@@ -74,7 +93,7 @@ void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
} // namespace
#endif
-void CodeStubAssembler::Check(const NodeGenerator& condition_body,
+void CodeStubAssembler::Check(const BranchGenerator& branch,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
@@ -88,9 +107,7 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
} else {
Comment("[ Assert");
}
- Node* condition = condition_body();
- DCHECK_NOT_NULL(condition);
- Branch(condition, &ok, &not_ok);
+ branch(&ok, &not_ok);
BIND(&not_ok);
DCHECK_NOT_NULL(message);
@@ -119,6 +136,24 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
Comment("] Assert");
}
+void CodeStubAssembler::Check(const NodeGenerator& condition_body,
+ const char* message, const char* file, int line,
+ Node* extra_node1, const char* extra_node1_name,
+ Node* extra_node2, const char* extra_node2_name,
+ Node* extra_node3, const char* extra_node3_name,
+ Node* extra_node4, const char* extra_node4_name,
+ Node* extra_node5, const char* extra_node5_name) {
+ BranchGenerator branch = [=](Label* ok, Label* not_ok) {
+ Node* condition = condition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, ok, not_ok);
+ };
+
+ Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2,
+ extra_node2_name, extra_node3, extra_node3_name, extra_node4,
+ extra_node4_name, extra_node5, extra_node5_name);
+}
+
Node* CodeStubAssembler::Select(SloppyTNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
@@ -503,7 +538,7 @@ Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
-TNode<Smi> CodeStubAssembler::SmiFromWord32(SloppyTNode<Int32T> value) {
+TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
return BitcastWordToTaggedSigned(
WordShl(value_intptr, SmiShiftBitsConstant()));
@@ -526,13 +561,13 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
-TNode<Int32T> CodeStubAssembler::SmiToWord32(SloppyTNode<Smi> value) {
+TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
TNode<IntPtrT> result = SmiUntag(value);
- return TruncateWordToWord32(result);
+ return TruncateIntPtrToInt32(result);
}
TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
- return ChangeInt32ToFloat64(SmiToWord32(value));
+ return ChangeInt32ToFloat64(SmiToInt32(value));
}
TNode<Smi> CodeStubAssembler::SmiMax(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
@@ -581,6 +616,45 @@ TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
return TNode<Object>::UncheckedCast(result.value());
}
+TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
+ TNode<Context> context, TNode<Object> index, TNode<IntPtrT> length) {
+ TVARIABLE(IntPtrT, result);
+
+ TNode<Number> const index_int =
+ ToInteger_Inline(context, index, CodeStubAssembler::kTruncateMinusZero);
+ TNode<IntPtrT> zero = IntPtrConstant(0);
+
+ Label done(this);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(index_int), &if_issmi, &if_isheapnumber);
+
+ BIND(&if_issmi);
+ {
+ TNode<Smi> const index_smi = CAST(index_int);
+ result = Select<IntPtrT>(
+ IntPtrLessThan(SmiUntag(index_smi), zero),
+ [&] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); },
+ [&] { return IntPtrMin(SmiUntag(index_smi), length); },
+ MachineType::PointerRepresentation());
+ Goto(&done);
+ }
+
+ BIND(&if_isheapnumber);
+ {
+ // If {index} is a heap number, it is definitely out of bounds. If it is
+ // negative, {index} = max({length} + {index}),0) = 0'. If it is positive,
+ // set {index} to {length}.
+ TNode<HeapNumber> const index_hn = CAST(index_int);
+ TNode<Float64T> const float_zero = Float64Constant(0.);
+ TNode<Float64T> const index_float = LoadHeapNumberValue(index_hn);
+ result = SelectConstant(Float64LessThan(index_float, float_zero), zero,
+ length, MachineType::PointerRepresentation());
+ Goto(&done);
+ }
+ BIND(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
@@ -588,8 +662,8 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
- a = SmiToWord32(a);
- b = SmiToWord32(b);
+ a = SmiToInt32(a);
+ b = SmiToInt32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
@@ -603,7 +677,7 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
{
// Fast case, don't need to check any other edge cases.
Node* r = Int32Mod(a, b);
- var_result.Bind(SmiFromWord32(r));
+ var_result.Bind(SmiFromInt32(r));
Goto(&return_result);
}
@@ -627,7 +701,7 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
- // architectures, so we cannot just say SmiFromWord32(r) here.
+ // architectures, so we cannot just say SmiFromInt32(r) here.
var_result.Bind(ChangeInt32ToTagged(r));
Goto(&return_result);
}
@@ -652,8 +726,8 @@ TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
Label return_result(this, &var_result);
// Both {a} and {b} are Smis. Convert them to integers and multiply.
- Node* lhs32 = SmiToWord32(a);
- Node* rhs32 = SmiToWord32(b);
+ Node* lhs32 = SmiToInt32(a);
+ Node* rhs32 = SmiToInt32(b);
Node* pair = Int32MulWithOverflow(lhs32, rhs32);
Node* overflow = Projection(1, pair);
@@ -702,7 +776,7 @@ TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
@@ -724,8 +798,8 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
}
BIND(&dividend_is_not_zero);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ Node* untagged_divisor = SmiToInt32(divisor);
+ Node* untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -749,10 +823,10 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
- return SmiFromWord32(untagged_result);
+ return SmiFromInt32(untagged_result);
}
-TNode<Int32T> CodeStubAssembler::TruncateWordToWord32(
+TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
@@ -860,16 +934,44 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
TVARIABLE(BoolT, var_result);
BIND(&if_true);
{
- var_result = ReinterpretCast<BoolT>(Int32Constant(1));
+ var_result = Int32TrueConstant();
+ Goto(&exit);
+ }
+ BIND(&if_false);
+ {
+ var_result = Int32FalseConstant();
Goto(&exit);
}
+ BIND(&exit);
+ return var_result.value();
+}
+
+TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context,
+ TNode<Context> native_context) {
+ Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
+ GotoIfForceSlowPath(&if_false);
+ TVARIABLE(BoolT, var_result, Int32TrueConstant());
+ BranchIfFastJSArray(object, context, &if_fast, &if_false);
+ BIND(&if_fast);
+ {
+ // Check if the Array.prototype[@@iterator] may have changed.
+ GotoIfNot(InitialArrayPrototypeHasInitialArrayPrototypeMap(native_context),
+ &if_false);
+ // Check if array[@@iterator] may have changed.
+ GotoIfNot(HasInitialFastElementsKindMap(native_context, CAST(object)),
+ &if_false);
+ // Check if the array iterator has changed.
+ Branch(HasInitialArrayIteratorPrototypeMap(native_context), &exit,
+ &if_false);
+ }
BIND(&if_false);
{
- var_result = ReinterpretCast<BoolT>(Int32Constant(0));
+ var_result = Int32FalseConstant();
Goto(&exit);
}
BIND(&exit);
- return var_result;
+ return var_result.value();
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
@@ -1180,7 +1282,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
return ChangeInt32ToIntPtr(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
- return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
+ return SmiToIntPtr(
+ LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
@@ -1193,7 +1296,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
return UncheckedCast<Int32T>(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
- return SmiToWord32(
+ return SmiToInt32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
@@ -1206,7 +1309,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
return ChangeInt32ToIntPtr(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
- return SmiToWord(
+ return SmiToIntPtr(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
@@ -1222,8 +1325,8 @@ Node* CodeStubAssembler::LoadAndUntagToWord32Root(
#endif
return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
} else {
- return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(index)));
+ return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start,
+ IntPtrConstant(index)));
}
}
@@ -1265,6 +1368,37 @@ Node* CodeStubAssembler::HasInstanceType(Node* object,
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
+TNode<BoolT> CodeStubAssembler::HasInitialArrayIteratorPrototypeMap(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> arr_it_proto_map = LoadMap(CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
+ TNode<Map> initial_map = CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ return WordEqual(arr_it_proto_map, initial_map);
+}
+
+TNode<BoolT>
+CodeStubAssembler::InitialArrayPrototypeHasInitialArrayPrototypeMap(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> proto_map = LoadMap(CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX)));
+ TNode<Map> initial_map = CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_MAP_INDEX));
+ return WordEqual(proto_map, initial_map);
+}
+
+TNode<BoolT> CodeStubAssembler::HasInitialFastElementsKindMap(
+ TNode<Context> native_context, TNode<JSArray> jsarray) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> map = LoadMap(jsarray);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(map);
+ TNode<Map> initial_jsarray_element_map =
+ LoadJSArrayElementsMap(elements_kind, native_context);
+ return WordEqual(initial_jsarray_element_map, map);
+}
+
Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
@@ -1302,7 +1436,7 @@ TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
TNode<Object> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
- return CAST(LoadObjectField(array, JSArray::kLengthOffset));
+ return LoadObjectField(array, JSArray::kLengthOffset);
}
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
@@ -1361,7 +1495,7 @@ TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
TNode<Object> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return CAST(LoadObjectField(map, Map::kPrototypeOffset));
+ return LoadObjectField(map, Map::kPrototypeOffset);
}
TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
@@ -1412,16 +1546,16 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
Goto(&loop);
BIND(&loop);
{
- GotoIf(TaggedIsSmi(result), &done);
+ GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
- InstanceTypeEqual(LoadInstanceType(CAST(result)), MAP_TYPE);
+ InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE);
GotoIfNot(is_map_type, &done);
- result =
- LoadObjectField(CAST(result), Map::kConstructorOrBackPointerOffset);
+ result = LoadObjectField(CAST(result.value()),
+ Map::kConstructorOrBackPointerOffset);
Goto(&loop);
}
BIND(&done);
- return result;
+ return result.value();
}
Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
@@ -1487,11 +1621,11 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
BIND(&done);
if (if_no_hash != nullptr) {
- GotoIf(
- IntPtrEqual(var_hash, IntPtrConstant(PropertyArray::kNoHashSentinel)),
- if_no_hash);
+ GotoIf(IntPtrEqual(var_hash.value(),
+ IntPtrConstant(PropertyArray::kNoHashSentinel)),
+ if_no_hash);
}
- return var_hash;
+ return var_hash.value();
}
TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) {
@@ -1555,7 +1689,7 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
- ParameterToWord(index_node, parameter_mode),
+ ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
@@ -1564,64 +1698,194 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
return Load(MachineType::AnyTagged(), object, offset);
}
-Node* CodeStubAssembler::LoadFixedTypedArrayElement(
+TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
+ TNode<FixedTypedArrayBase> typed_array) {
+ // Backing store = external_pointer + base_pointer.
+ Node* external_pointer =
+ LoadObjectField(typed_array, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* base_pointer =
+ LoadObjectField(typed_array, FixedTypedArrayBase::kBasePointerOffset);
+ return UncheckedCast<RawPtrT>(
+ IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
+}
+
+Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
+ Node* data_pointer, Node* offset) {
+ TVARIABLE(BigInt, var_result);
+ Label done(this), if_zero(this);
+ if (Is64()) {
+ TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), data_pointer, offset));
+ Label if_positive(this), if_negative(this);
+ GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
+ &if_negative);
+
+ BIND(&if_positive);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(false) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
+ Goto(&done);
+ }
+
+ BIND(&if_negative);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(true) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0,
+ Unsigned(IntPtrSub(IntPtrConstant(0), value)));
+ Goto(&done);
+ }
+ } else {
+ DCHECK(!Is64());
+ TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
+ TVARIABLE(IntPtrT, var_low);
+ TVARIABLE(IntPtrT, var_high);
+ var_low = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ var_high = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+
+ Label high_zero(this), negative(this), allocate_one_digit(this),
+ allocate_two_digits(this);
+
+ GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
+ Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
+ &allocate_two_digits);
+
+ BIND(&high_zero);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
+ &allocate_one_digit);
+
+ BIND(&negative);
+ {
+ var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
+ // We must negate the value by computing "0 - (high|low)", performing
+ // both parts of the subtraction separately and manually taking care
+ // of the carry bit (which is 1 iff low != 0).
+ var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
+ Label carry(this), no_carry(this);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
+ BIND(&carry);
+ var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
+ Goto(&no_carry);
+ BIND(&no_carry);
+ var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
+ // var_high was non-zero going into this block, but subtracting the
+ // carry bit from it could bring us back onto the "one digit" path.
+ Branch(WordEqual(var_high.value(), IntPtrConstant(0)),
+ &allocate_one_digit, &allocate_two_digits);
+ }
+
+ BIND(&allocate_one_digit);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ StoreBigIntBitfield(
+ var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(1))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ Goto(&done);
+ }
+
+ BIND(&allocate_two_digits);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(2));
+ StoreBigIntBitfield(
+ var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(2))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
+ Goto(&done);
+ }
+ }
+ BIND(&if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+ Node* data_pointer, Node* offset) {
+ TVARIABLE(BigInt, var_result);
+ Label if_zero(this), done(this);
+ if (Is64()) {
+ TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, value);
+ Goto(&done);
+ } else {
+ DCHECK(!Is64());
+ Label high_zero(this);
+
+ TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+
+ GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
+ var_result = AllocateBigInt(IntPtrConstant(2));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ StoreBigIntDigit(var_result.value(), 1, high);
+ Goto(&done);
+
+ BIND(&high_zero);
+ GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ Goto(&done);
+ }
+ BIND(&if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
- MachineType type;
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
case UINT8_CLAMPED_ELEMENTS:
- type = MachineType::Uint8();
- break;
+ return SmiFromInt32(Load(MachineType::Uint8(), data_pointer, offset));
case INT8_ELEMENTS:
- type = MachineType::Int8();
- break;
+ return SmiFromInt32(Load(MachineType::Int8(), data_pointer, offset));
case UINT16_ELEMENTS:
- type = MachineType::Uint16();
- break;
+ return SmiFromInt32(Load(MachineType::Uint16(), data_pointer, offset));
case INT16_ELEMENTS:
- type = MachineType::Int16();
- break;
+ return SmiFromInt32(Load(MachineType::Int16(), data_pointer, offset));
case UINT32_ELEMENTS:
- type = MachineType::Uint32();
- break;
+ return ChangeUint32ToTagged(
+ Load(MachineType::Uint32(), data_pointer, offset));
case INT32_ELEMENTS:
- type = MachineType::Int32();
- break;
+ return ChangeInt32ToTagged(
+ Load(MachineType::Int32(), data_pointer, offset));
case FLOAT32_ELEMENTS:
- type = MachineType::Float32();
- break;
+ return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(
+ Load(MachineType::Float32(), data_pointer, offset)));
case FLOAT64_ELEMENTS:
- type = MachineType::Float64();
- break;
- default:
- UNREACHABLE();
- }
- return Load(type, data_pointer, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
- ParameterMode parameter_mode) {
- Node* value = LoadFixedTypedArrayElement(data_pointer, index_node,
- elements_kind, parameter_mode);
- switch (elements_kind) {
- case ElementsKind::INT8_ELEMENTS:
- case ElementsKind::UINT8_CLAMPED_ELEMENTS:
- case ElementsKind::UINT8_ELEMENTS:
- case ElementsKind::INT16_ELEMENTS:
- case ElementsKind::UINT16_ELEMENTS:
- return SmiFromWord32(value);
- case ElementsKind::INT32_ELEMENTS:
- return ChangeInt32ToTagged(value);
- case ElementsKind::UINT32_ELEMENTS:
- return ChangeUint32ToTagged(value);
- case ElementsKind::FLOAT32_ELEMENTS:
- return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value));
- case ElementsKind::FLOAT64_ELEMENTS:
- return AllocateHeapNumberWithValue(value);
+ return AllocateHeapNumberWithValue(
+ Load(MachineType::Float64(), data_pointer, offset));
+ case BIGINT64_ELEMENTS:
+ return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset);
+ case BIGUINT64_ELEMENTS:
+ return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset);
default:
UNREACHABLE();
}
@@ -1656,7 +1920,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
} else {
- return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
+ return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
}
}
@@ -1995,8 +2259,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
- TNode<IntPtrT> first = *arg_index;
- Node* growth = WordToParameter(
+ TNode<IntPtrT> first = arg_index->value();
+ Node* growth = IntPtrToParameter(
IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
first),
mode);
@@ -2028,12 +2292,12 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
var_tagged_length = length;
Node* diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
- *arg_index = IntPtrAdd(*arg_index, SmiUntag(diff));
+ *arg_index = IntPtrAdd(arg_index->value(), SmiUntag(diff));
Goto(bailout);
}
BIND(&success);
- return var_tagged_length;
+ return var_tagged_length.value();
}
void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
@@ -2121,11 +2385,55 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
- AllocationFlags flags) {
+TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
+ TNode<BigInt> result = AllocateRawBigInt(length);
+ STATIC_ASSERT(BigInt::LengthBits::kShift == 0);
+ StoreBigIntBitfield(result, length);
+ return result;
+}
+
+TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
+ // This is currently used only for 64-bit wide BigInts. If more general
+ // applicability is required, a large-object check must be added.
+ CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3)));
+
+ TNode<IntPtrT> size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
+ Signed(WordShl(length, kPointerSizeLog2)));
+ Node* raw_result = Allocate(size, kNone);
+ StoreMapNoWriteBarrier(raw_result, Heap::kBigIntMapRootIndex);
+ return UncheckedCast<BigInt>(raw_result);
+}
+
+void CodeStubAssembler::StoreBigIntBitfield(TNode<BigInt> bigint,
+ TNode<WordT> bitfield) {
+ StoreObjectFieldNoWriteBarrier(bigint, BigInt::kBitfieldOffset, bitfield,
+ MachineType::PointerRepresentation());
+}
+
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ TNode<UintPtrT> digit) {
+ StoreObjectFieldNoWriteBarrier(
+ bigint, BigInt::kDigitsOffset + digit_index * kPointerSize, digit,
+ UintPtrT::kMachineRepresentation);
+}
+
+TNode<WordT> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
+ return UncheckedCast<WordT>(
+ LoadObjectField(bigint, BigInt::kBitfieldOffset, MachineType::UintPtr()));
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
+ int digit_index) {
+ return UncheckedCast<UintPtrT>(LoadObjectField(
+ bigint, BigInt::kDigitsOffset + digit_index * kPointerSize,
+ MachineType::UintPtr()));
+}
+
+TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
+ int length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
if (length == 0) {
- return LoadRoot(Heap::kempty_stringRootIndex);
+ return CAST(LoadRoot(Heap::kempty_stringRootIndex));
}
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
@@ -2136,7 +2444,7 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
- return result;
+ return CAST(result);
}
Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
@@ -2153,9 +2461,8 @@ Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
- TNode<Smi> length,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
+ Node* context, TNode<Smi> length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -2203,14 +2510,14 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
}
BIND(&if_join);
- return var_result.value();
+ return CAST(var_result.value());
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
+ int length, AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
if (length == 0) {
- return LoadRoot(Heap::kempty_stringRootIndex);
+ return CAST(LoadRoot(Heap::kempty_stringRootIndex));
}
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
@@ -2221,13 +2528,12 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
- TNode<Smi> length,
- AllocationFlags flags) {
- CSA_SLOW_ASSERT(this, IsFixedArray(context));
+TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
+ Node* context, TNode<Smi> length, AllocationFlags flags) {
+ CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -2274,14 +2580,14 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
}
BIND(&if_join);
- return var_result.value();
+ return CAST(var_result.value());
}
-Node* CodeStubAssembler::AllocateSlicedString(
- Heap::RootListIndex map_root_index, TNode<Smi> length, Node* parent,
- Node* offset) {
- CSA_ASSERT(this, IsString(parent));
- CSA_ASSERT(this, TaggedIsSmi(offset));
+TNode<String> CodeStubAssembler::AllocateSlicedString(
+ Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> parent,
+ TNode<Smi> offset) {
+ DCHECK(map_root_index == Heap::kSlicedOneByteStringMapRootIndex ||
+ map_root_index == Heap::kSlicedStringMapRootIndex);
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
@@ -2294,29 +2600,26 @@ Node* CodeStubAssembler::AllocateSlicedString(
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
MachineRepresentation::kTagged);
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateSlicedOneByteString(TNode<Smi> length,
- Node* parent,
- Node* offset) {
+TNode<String> CodeStubAssembler::AllocateSlicedOneByteString(
+ TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
parent, offset);
}
-Node* CodeStubAssembler::AllocateSlicedTwoByteString(TNode<Smi> length,
- Node* parent,
- Node* offset) {
+TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
+ TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
offset);
}
-Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* first,
- Node* second,
- AllocationFlags flags) {
- CSA_ASSERT(this, IsString(first));
- CSA_ASSERT(this, IsString(second));
+TNode<String> CodeStubAssembler::AllocateConsString(
+ Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> first,
+ TNode<String> second, AllocationFlags flags) {
+ DCHECK(map_root_index == Heap::kConsOneByteStringMapRootIndex ||
+ map_root_index == Heap::kConsStringMapRootIndex);
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
@@ -2335,29 +2638,28 @@ Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
StoreObjectField(result, ConsString::kFirstOffset, first);
StoreObjectField(result, ConsString::kSecondOffset, second);
}
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateOneByteConsString(TNode<Smi> length,
- Node* first, Node* second,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateOneByteConsString(
+ TNode<Smi> length, TNode<String> first, TNode<String> second,
+ AllocationFlags flags) {
return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::AllocateTwoByteConsString(TNode<Smi> length,
- Node* first, Node* second,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateTwoByteConsString(
+ TNode<Smi> length, TNode<String> first, TNode<String> second,
+ AllocationFlags flags) {
return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
- Node* left, Node* right,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
+ TNode<String> left,
+ TNode<String> right,
+ AllocationFlags flags) {
CSA_ASSERT(this, IsFixedArray(context));
- CSA_ASSERT(this, IsString(left));
- CSA_ASSERT(this, IsString(right));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
@@ -2382,7 +2684,7 @@ Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
STATIC_ASSERT(kOneByteDataHintTag != 0);
Label one_byte_map(this);
Label two_byte_map(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(String, result);
Label done(this, &result);
GotoIf(IsSetWord32(anded_instance_types,
kStringEncodingMask | kOneByteDataHintTag),
@@ -2395,12 +2697,12 @@ Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
BIND(&one_byte_map);
Comment("One-byte ConsString");
- result.Bind(AllocateOneByteConsString(length, left, right, flags));
+ result = AllocateOneByteConsString(length, left, right, flags);
Goto(&done);
BIND(&two_byte_map);
Comment("Two-byte ConsString");
- result.Bind(AllocateTwoByteConsString(length, left, right, flags));
+ result = AllocateTwoByteConsString(length, left, right, flags);
Goto(&done);
BIND(&done);
@@ -2433,7 +2735,7 @@ Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
- SmiFromWord(length));
+ SmiFromIntPtr(length));
// Initialized HashTable fields.
Node* zero = SmiConstant(0);
StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
@@ -2825,6 +3127,7 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
ParameterMode mode,
AllocationFlags flags,
Node* fixed_array_map) {
+ Comment("AllocateFixedArray");
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
@@ -2891,7 +3194,7 @@ Node* CodeStubAssembler::ExtractFixedArray(Node* fixed_array, Node* first,
Label if_fixed_double_array(this), empty(this), cow(this),
done(this, {&var_result, &var_fixed_array_map});
var_fixed_array_map.Bind(LoadMap(fixed_array));
- GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), count), &empty);
+ GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty);
if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
@@ -3568,7 +3871,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
GotoIf(TaggedIsNotSmi(value), &not_smi);
// {value} is a Smi.
- var_word32->Bind(SmiToWord32(value));
+ var_word32->Bind(SmiToInt32(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
@@ -3671,7 +3974,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Goto(&if_join);
}
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
@@ -3700,7 +4003,7 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
}
Goto(&if_join);
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
@@ -3741,7 +4044,7 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
Goto(&if_join);
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
@@ -3773,9 +4076,8 @@ TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
BIND(&if_valueisnullorundefined);
{
// The {value} is either null or undefined.
- CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- StringConstant(method_name));
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ method_name);
}
}
}
@@ -3807,7 +4109,7 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
}
BIND(&done);
- return result;
+ return result.value();
}
TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
@@ -3823,11 +4125,11 @@ TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
BIND(&smi);
TNode<Smi> value_smi = CAST(value);
CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
- result = UncheckedCast<UintPtrT>(SmiToWord(value_smi));
+ result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
Goto(&done);
BIND(&done);
- return result;
+ return result.value();
}
Node* CodeStubAssembler::TimesPointerSize(Node* value) {
@@ -3918,14 +4220,6 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
return var_value.value();
}
-void CodeStubAssembler::ThrowIncompatibleMethodReceiver(Node* context,
- const char* method_name,
- Node* receiver) {
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
-}
-
Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name) {
@@ -3943,7 +4237,8 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
- ThrowIncompatibleMethodReceiver(context, method_name, value);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), value);
BIND(&out);
return var_value_map.value();
@@ -4019,19 +4314,6 @@ Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
-Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
- uint32_t mask =
- Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
- USE(mask);
- // Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
- return is_special;
-}
-
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
@@ -4065,6 +4347,13 @@ Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+Node* CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kPromiseThenProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
Node* CodeStubAssembler::IsSpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kSpeciesProtectorRootIndex);
@@ -4081,6 +4370,18 @@ Node* CodeStubAssembler::IsPrototypeInitialArrayPrototype(Node* context,
return WordEqual(proto, initial_array_prototype);
}
+TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
+ SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const typed_array_prototype =
+ LoadContextElement(native_context, Context::TYPED_ARRAY_PROTOTYPE_INDEX);
+ TNode<Object> proto = LoadMapPrototype(map);
+ TNode<Object> proto_of_proto = Select<Object>(
+ IsJSObject(proto), [=] { return LoadMapPrototype(LoadMap(CAST(proto))); },
+ [=] { return NullConstant(); }, MachineRepresentation::kTagged);
+ return WordEqual(proto_of_proto, typed_array_prototype);
+}
+
Node* CodeStubAssembler::IsCallable(Node* object) {
return IsCallableMap(LoadMap(object));
}
@@ -4089,6 +4390,10 @@ Node* CodeStubAssembler::IsCell(Node* object) {
return WordEqual(LoadMap(object), LoadRoot(Heap::kCellMapRootIndex));
}
+Node* CodeStubAssembler::IsCode(Node* object) {
+ return HasInstanceType(object, CODE_TYPE);
+}
+
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::IsConstructorBit>(LoadMapBitField(map));
@@ -4203,6 +4508,15 @@ Node* CodeStubAssembler::IsJSObject(Node* object) {
return IsJSObjectMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsJSPromiseMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return InstanceTypeEqual(LoadMapInstanceType(map), JS_PROMISE_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSPromise(Node* object) {
+ return IsJSPromiseMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsJSProxy(Node* object) {
return HasInstanceType(object, JS_PROXY_TYPE);
}
@@ -4237,6 +4551,10 @@ Node* CodeStubAssembler::IsJSArrayMap(Node* map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
+Node* CodeStubAssembler::IsJSAsyncGeneratorObject(Node* object) {
+ return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+}
+
Node* CodeStubAssembler::IsFixedArray(Node* object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
@@ -4249,6 +4567,10 @@ Node* CodeStubAssembler::IsFixedArraySubclass(Node* object) {
Int32Constant(LAST_FIXED_ARRAY_TYPE)));
}
+Node* CodeStubAssembler::IsPromiseCapability(Node* object) {
+ return HasInstanceType(object, PROMISE_CAPABILITY_TYPE);
+}
+
Node* CodeStubAssembler::IsPropertyArray(Node* object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
@@ -4324,6 +4646,10 @@ Node* CodeStubAssembler::IsMutableHeapNumber(Node* object) {
return IsMutableHeapNumberMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsFeedbackCell(Node* object) {
+ return HasInstanceType(object, FEEDBACK_CELL_TYPE);
+}
+
Node* CodeStubAssembler::IsFeedbackVector(Node* object) {
return IsFeedbackVectorMap(LoadMap(object));
}
@@ -4364,7 +4690,7 @@ Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
[=] {
TNode<Symbol> symbol = CAST(object);
TNode<Int32T> flags =
- SmiToWord32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
+ SmiToInt32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
return IsSetWord32(flags, 1 << Symbol::kPrivateBit);
},
[=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
@@ -4391,6 +4717,10 @@ Node* CodeStubAssembler::IsNumberDictionary(Node* object) {
LoadRoot(Heap::kNumberDictionaryMapRootIndex));
}
+Node* CodeStubAssembler::IsJSGeneratorObject(Node* object) {
+ return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE);
+}
+
Node* CodeStubAssembler::IsJSFunctionInstanceType(Node* instance_type) {
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
@@ -4553,12 +4883,12 @@ TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
{
Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
string, SmiTag(index));
- var_result = SmiToWord32(result);
+ var_result = SmiToInt32(result);
Goto(&return_result);
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
@@ -4623,11 +4953,11 @@ TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
// given character range using CopyStringCharacters.
// |from_string| must be a sequential string.
// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-Node* CodeStubAssembler::AllocAndCopyStringCharacters(
- Node* context, Node* from, Node* from_instance_type,
- TNode<IntPtrT> from_index, TNode<Smi> character_count) {
+TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
+ Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
+ TNode<Smi> character_count) {
Label end(this), one_byte_sequential(this), two_byte_sequential(this);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(String, var_result);
Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
&two_byte_sequential);
@@ -4635,24 +4965,24 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(
// The subject string is a sequential one-byte string.
BIND(&one_byte_sequential);
{
- Node* result = AllocateSeqOneByteString(context, character_count);
+ TNode<String> result =
+ AllocateSeqOneByteString(NoContextConstant(), character_count);
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
SmiUntag(character_count), String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING);
- var_result.Bind(result);
-
+ var_result = result;
Goto(&end);
}
// The subject string is a sequential two-byte string.
BIND(&two_byte_sequential);
{
- Node* result = AllocateSeqTwoByteString(context, character_count);
+ TNode<String> result =
+ AllocateSeqTwoByteString(NoContextConstant(), character_count);
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
SmiUntag(character_count), String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- var_result.Bind(result);
-
+ var_result = result;
Goto(&end);
}
@@ -4660,50 +4990,34 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(
return var_result.value();
}
-
-Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
- Node* to, SubStringFlags flags) {
- DCHECK(flags == SubStringFlags::NONE ||
- flags == SubStringFlags::FROM_TO_ARE_BOUNDED);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::SubString(TNode<String> string,
+ TNode<IntPtrT> from,
+ TNode<IntPtrT> to) {
+ TVARIABLE(String, var_result);
ToDirectStringAssembler to_direct(state(), string);
Label end(this), runtime(this);
- // Make sure first argument is a string.
- CSA_ASSERT(this, TaggedIsNotSmi(string));
- CSA_ASSERT(this, IsString(string));
-
- // Make sure that both from and to are non-negative smis.
-
- if (flags == SubStringFlags::NONE) {
- GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
- GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
- } else {
- CSA_ASSERT(this, TaggedIsPositiveSmi(from));
- CSA_ASSERT(this, TaggedIsPositiveSmi(to));
- }
-
- TNode<Smi> const substr_length = SmiSub(to, from);
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ TNode<IntPtrT> const substr_length = IntPtrSub(to, from);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
// Begin dispatching based on substring length.
Label original_string_or_invalid_length(this);
- GotoIf(SmiAboveOrEqual(substr_length, string_length),
+ GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length),
&original_string_or_invalid_length);
// A real substring (substr_length < string_length).
Label single_char(this);
- GotoIf(SmiEqual(substr_length, SmiConstant(1)), &single_char);
+ GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
// TODO(jgruber): Add an additional case for substring of length == 0?
// Deal with different string types: update the index if necessary
// and extract the underlying string.
- Node* const direct_string = to_direct.TryToDirect(&runtime);
- Node* const offset = SmiAdd(from, SmiTag(to_direct.offset()));
+ TNode<String> direct_string = to_direct.TryToDirect(&runtime);
+ TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
Node* const instance_type = to_direct.instance_type();
// The subject string can only be external or sequential string of either
@@ -4714,7 +5028,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Label next(this);
// Short slice. Copy instead of slicing.
- GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
+ GotoIf(IntPtrLessThan(substr_length,
+ IntPtrConstant(SlicedString::kMinLength)),
&next);
// Allocate new sliced string.
@@ -4728,15 +5043,15 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
BIND(&one_byte_slice);
{
- var_result.Bind(
- AllocateSlicedOneByteString(substr_length, direct_string, offset));
+ var_result = AllocateSlicedOneByteString(SmiTag(substr_length),
+ direct_string, SmiTag(offset));
Goto(&end);
}
BIND(&two_byte_slice);
{
- var_result.Bind(
- AllocateSlicedTwoByteString(substr_length, direct_string, offset));
+ var_result = AllocateSlicedTwoByteString(SmiTag(substr_length),
+ direct_string, SmiTag(offset));
Goto(&end);
}
@@ -4747,9 +5062,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// encoding at this point.
GotoIf(to_direct.is_external(), &external_string);
- var_result.Bind(
- AllocAndCopyStringCharacters(context, direct_string, instance_type,
- SmiUntag(offset), substr_length));
+ var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
+ offset, SmiTag(substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -4762,9 +5076,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
{
Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
- var_result.Bind(AllocAndCopyStringCharacters(
- context, fake_sequential_string, instance_type, SmiUntag(offset),
- substr_length));
+ var_result = AllocAndCopyStringCharacters(
+ fake_sequential_string, instance_type, offset, SmiTag(substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -4775,44 +5088,37 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
- TNode<Int32T> char_code = StringCharCodeAt(string, SmiUntag(from));
- var_result.Bind(StringFromCharCode(char_code));
+ TNode<Int32T> char_code = StringCharCodeAt(string, from);
+ var_result = StringFromCharCode(char_code);
Goto(&end);
}
BIND(&original_string_or_invalid_length);
{
- if (flags == SubStringFlags::NONE) {
- // Longer than original string's length or negative: unsafe arguments.
- GotoIf(SmiAbove(substr_length, string_length), &runtime);
- } else {
- // with flag SubStringFlags::FROM_TO_ARE_BOUNDED, the only way we can
- // get here is if substr_length is equal to string_length.
- CSA_ASSERT(this, SmiEqual(substr_length, string_length));
- }
+ CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
// Equal length - check if {from, to} == {0, str.length}.
- GotoIf(SmiAbove(from, SmiConstant(0)), &runtime);
+ GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
// Return the original string (substr_length == string_length).
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
- var_result.Bind(string);
+ var_result = string;
Goto(&end);
}
// Fall back to a runtime call.
BIND(&runtime);
{
- var_result.Bind(
- CallRuntime(Runtime::kSubString, context, string, from, to));
+ var_result =
+ CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string,
+ SmiTag(from), SmiTag(to)));
Goto(&end);
}
BIND(&end);
- CSA_ASSERT(this, IsString(var_result.value()));
return var_result.value();
}
@@ -4833,7 +5139,7 @@ ToDirectStringAssembler::ToDirectStringAssembler(
var_is_external_.Bind(Int32Constant(0));
}
-Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
+TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone());
Label dispatch(this, vars);
Label if_iscons(this);
@@ -4916,7 +5222,7 @@ Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
Goto(&out);
BIND(&out);
- return var_string_.value();
+ return CAST(var_string_.value());
}
Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
@@ -5034,22 +5340,23 @@ void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
// Fall through if neither string was an indirect string.
}
-Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
- AllocationFlags flags) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
+ TNode<String> right,
+ AllocationFlags flags) {
+ TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
done(this, &result), done_native(this, &result);
Counters* counters = isolate()->counters();
TNode<Smi> left_length = LoadStringLengthAsSmi(left);
GotoIf(SmiNotEqual(SmiConstant(0), left_length), &check_right);
- result.Bind(right);
+ result = right;
Goto(&done_native);
BIND(&check_right);
TNode<Smi> right_length = LoadStringLengthAsSmi(right);
GotoIf(SmiNotEqual(SmiConstant(0), right_length), &cons);
- result.Bind(left);
+ result = left;
Goto(&done_native);
BIND(&cons);
@@ -5062,16 +5369,16 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
GotoIf(SmiGreaterThan(new_length, SmiConstant(String::kMaxLength)),
&runtime);
- VARIABLE(var_left, MachineRepresentation::kTagged, left);
- VARIABLE(var_right, MachineRepresentation::kTagged, right);
+ TVARIABLE(String, var_left, left);
+ TVARIABLE(String, var_right, right);
Variable* input_vars[2] = {&var_left, &var_right};
Label non_cons(this, 2, input_vars);
Label slow(this, Label::kDeferred);
GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
&non_cons);
- result.Bind(NewConsString(context, new_length, var_left.value(),
- var_right.value(), flags));
+ result = NewConsString(context, new_length, var_left.value(),
+ var_right.value(), flags);
Goto(&done_native);
BIND(&non_cons);
@@ -5099,29 +5406,27 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
Int32Constant(kTwoByteStringTag)),
&two_byte);
// One-byte sequential string case
- Node* new_string = AllocateSeqOneByteString(context, new_length);
- CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ result = AllocateSeqOneByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
IntPtrConstant(0), word_left_length,
String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
word_left_length, word_right_length,
String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- result.Bind(new_string);
Goto(&done_native);
BIND(&two_byte);
{
// Two-byte sequential string case
- new_string = AllocateSeqTwoByteString(context, new_length);
- CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ result = AllocateSeqTwoByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
IntPtrConstant(0), word_left_length,
String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
word_left_length, word_right_length,
String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- result.Bind(new_string);
Goto(&done_native);
}
@@ -5135,7 +5440,7 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
}
BIND(&runtime);
{
- result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
+ result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right));
Goto(&done);
}
@@ -5200,8 +5505,7 @@ TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
return CAST(var_result.value());
}
-TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
- CSA_SLOW_ASSERT(this, IsString(input));
+TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -5224,11 +5528,11 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
}
BIND(&end);
- return var_result;
+ return var_result.value();
}
-Node* CodeStubAssembler::NumberToString(Node* argument) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+ TVARIABLE(String, result);
Label runtime(this, Label::kDeferred), smi(this), done(this, &result);
// Load the number string cache.
@@ -5239,23 +5543,22 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
// TODO(ishell): cleanup mask handling.
Node* mask =
BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
- Node* one = IntPtrConstant(1);
+ TNode<IntPtrT> one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
- GotoIf(TaggedIsSmi(argument), &smi);
+ GotoIf(TaggedIsSmi(input), &smi);
- // Argument isn't smi, check to see if it's a heap-number.
- GotoIfNot(IsHeapNumber(argument), &runtime);
+ TNode<HeapNumber> heap_number_input = CAST(input);
// Make a hash from the two 32-bit values of the double.
- Node* low =
- LoadObjectField(argument, HeapNumber::kValueOffset, MachineType::Int32());
- Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
- MachineType::Int32());
- Node* hash = Word32Xor(low, high);
- hash = ChangeInt32ToIntPtr(hash);
- hash = WordShl(hash, one);
- Node* index = WordAnd(hash, WordSar(mask, SmiShiftBitsConstant()));
+ TNode<Int32T> low =
+ LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
+ TNode<Int32T> high = LoadObjectField<Int32T>(
+ heap_number_input, HeapNumber::kValueOffset + kIntSize);
+ TNode<Word32T> hash = Word32Xor(low, high);
+ TNode<WordT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
+ TNode<WordT> index =
+ WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
// Cache entry's key must be a heap number
Node* number_key = LoadFixedArrayElement(number_string_cache, index);
@@ -5272,14 +5575,15 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
// Heap number match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
+ result =
+ CAST(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
Goto(&done);
BIND(&runtime);
{
// No cache entry, go to the runtime.
- result.Bind(CallRuntime(Runtime::kNumberToStringSkipCache,
- NoContextConstant(), argument));
+ result = CAST(CallRuntime(Runtime::kNumberToStringSkipCache,
+ NoContextConstant(), input));
}
Goto(&done);
@@ -5287,20 +5591,19 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
{
// Load the smi key, make sure it matches the smi we're looking for.
Node* smi_index = BitcastWordToTagged(
- WordAnd(WordShl(BitcastTaggedToWord(argument), one), mask));
+ WordAnd(WordShl(BitcastTaggedToWord(input), one), mask));
Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
SMI_PARAMETERS);
- GotoIf(WordNotEqual(smi_key, argument), &runtime);
+ GotoIf(WordNotEqual(smi_key, input), &runtime);
// Smi match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result.Bind(LoadFixedArrayElement(number_string_cache, smi_index,
- kPointerSize, SMI_PARAMETERS));
+ result = CAST(LoadFixedArrayElement(number_string_cache, smi_index,
+ kPointerSize, SMI_PARAMETERS));
Goto(&done);
}
BIND(&done);
- CSA_ASSERT(this, IsString(result.value()));
return result.value();
}
@@ -5382,7 +5685,8 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- var_result.Bind(StringToNumber(input));
+ TNode<String> string_input = CAST(input);
+ var_result.Bind(StringToNumber(string_input));
Goto(&end);
}
@@ -5479,6 +5783,31 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(
return UncheckedCast<Numeric>(result);
}
+TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ TVARIABLE(Number, var_result);
+ Label end(this), not_smi(this, Label::kDeferred);
+
+ GotoIfNot(TaggedIsSmi(input), &not_smi);
+ var_result = CAST(input);
+ Goto(&end);
+
+ BIND(&not_smi);
+ {
+ var_result =
+ Select<Number>(IsHeapNumber(input), [=] { return CAST(input); },
+ [=] {
+ return CAST(CallBuiltin(Builtins::kNonNumberToNumber,
+ context, input));
+ },
+ MachineRepresentation::kTagged);
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
SloppyTNode<Object> input,
BigIntHandling bigint_handling) {
@@ -5509,7 +5838,28 @@ TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
}
BIND(&end);
- return var_result;
+ return var_result.value();
+}
+
+TNode<BigInt> CodeStubAssembler::ToBigInt(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ TVARIABLE(BigInt, var_result);
+ Label if_bigint(this), done(this), if_throw(this);
+
+ GotoIf(TaggedIsSmi(input), &if_throw);
+ GotoIf(IsBigInt(input), &if_bigint);
+ var_result = CAST(CallRuntime(Runtime::kToBigInt, context, input));
+ Goto(&done);
+
+ BIND(&if_bigint);
+ var_result = CAST(input);
+ Goto(&done);
+
+ BIND(&if_throw);
+ ThrowTypeError(context, MessageTemplate::kBigIntFromObject, input);
+
+ BIND(&done);
+ return var_result.value();
}
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
@@ -5594,7 +5944,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
BIND(&if_isnegativesmi);
{
- Node* const uint32_value = SmiToWord32(number);
+ Node* const uint32_value = SmiToInt32(number);
Node* float64_value = ChangeUint32ToFloat64(uint32_value);
var_result.Bind(AllocateHeapNumberWithValue(float64_value));
Goto(&out);
@@ -5680,7 +6030,8 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
BIND(&is_number);
- result.Bind(NumberToString(input));
+ TNode<Number> number_input = CAST(input);
+ result.Bind(NumberToString(number_input));
Goto(&done);
BIND(&not_heap_number);
@@ -5741,59 +6092,78 @@ Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
return result.value();
}
-Node* CodeStubAssembler::ToSmiIndex(Node* const input, Node* const context,
- Label* range_error) {
- VARIABLE(result, MachineRepresentation::kTagged, input);
+TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ return CAST(CallBuiltin(Builtins::kToObject, context, input));
+}
+
+TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Object> input,
+ TNode<Context> context,
+ Label* range_error) {
+ TVARIABLE(Smi, result);
Label check_undefined(this), return_zero(this), defined(this),
negative_check(this), done(this);
- Branch(TaggedIsSmi(result.value()), &negative_check, &check_undefined);
+
+ GotoIfNot(TaggedIsSmi(input), &check_undefined);
+ result = CAST(input);
+ Goto(&negative_check);
BIND(&check_undefined);
- Branch(IsUndefined(result.value()), &return_zero, &defined);
+ Branch(IsUndefined(input), &return_zero, &defined);
BIND(&defined);
- result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
- CodeStubAssembler::kTruncateMinusZero));
- GotoIfNot(TaggedIsSmi(result.value()), range_error);
- CSA_ASSERT(this, TaggedIsSmi(result.value()));
+ TNode<Number> integer_input =
+ CAST(CallBuiltin(Builtins::kToInteger_TruncateMinusZero, context, input));
+ GotoIfNot(TaggedIsSmi(integer_input), range_error);
+ result = CAST(integer_input);
Goto(&negative_check);
BIND(&negative_check);
Branch(SmiLessThan(result.value(), SmiConstant(0)), range_error, &done);
BIND(&return_zero);
- result.Bind(SmiConstant(0));
+ result = SmiConstant(0);
Goto(&done);
BIND(&done);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
-Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
- Label* range_error) {
- VARIABLE(result, MachineRepresentation::kTagged, input);
- Label to_integer(this), negative_check(this), return_zero(this), done(this);
- Branch(TaggedIsSmi(result.value()), &negative_check, &to_integer);
+TNode<Smi> CodeStubAssembler::ToSmiLength(TNode<Object> input,
+ TNode<Context> context,
+ Label* range_error) {
+ TVARIABLE(Smi, result);
+ Label to_integer(this), negative_check(this),
+ heap_number_negative_check(this), return_zero(this), done(this);
+
+ GotoIfNot(TaggedIsSmi(input), &to_integer);
+ result = CAST(input);
+ Goto(&negative_check);
BIND(&to_integer);
- result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
- CodeStubAssembler::kTruncateMinusZero));
- GotoIf(TaggedIsSmi(result.value()), &negative_check);
- // result.value() can still be a negative HeapNumber here.
- Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, result.value(),
- SmiConstant(0))),
- &return_zero, range_error);
+ {
+ TNode<Number> integer_input = CAST(
+ CallBuiltin(Builtins::kToInteger_TruncateMinusZero, context, input));
+ GotoIfNot(TaggedIsSmi(integer_input), &heap_number_negative_check);
+ result = CAST(integer_input);
+ Goto(&negative_check);
+
+ // integer_input can still be a negative HeapNumber here.
+ BIND(&heap_number_negative_check);
+ TNode<HeapNumber> heap_number_input = CAST(integer_input);
+ Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, heap_number_input,
+ SmiConstant(0))),
+ &return_zero, range_error);
+ }
BIND(&negative_check);
Branch(SmiLessThan(result.value(), SmiConstant(0)), &return_zero, &done);
BIND(&return_zero);
- result.Bind(SmiConstant(0));
+ result = SmiConstant(0);
Goto(&done);
BIND(&done);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
@@ -5829,7 +6199,7 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
Label return_zero(this, Label::kDeferred);
// Load the current {arg} value.
- TNode<Object> arg = var_arg;
+ TNode<Object> arg = var_arg.value();
// Check if {arg} is a Smi.
GotoIf(TaggedIsSmi(arg), &out);
@@ -5874,8 +6244,9 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
}
BIND(&out);
- if (mode == kTruncateMinusZero) CSA_ASSERT(this, IsNumberNormalized(var_arg));
- return CAST(var_arg);
+ if (mode == kTruncateMinusZero)
+ CSA_ASSERT(this, IsNumberNormalized(var_arg.value()));
+ return CAST(var_arg.value());
}
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
@@ -6176,7 +6547,7 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key) {
Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
- Node* hash = TruncateWordToWord32(key);
+ Node* hash = TruncateIntPtrToInt32(key);
hash = Word32Xor(hash, seed);
hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)),
Word32Shl(hash, Int32Constant(15)));
@@ -6382,36 +6753,38 @@ Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
}
-namespace {
-
-Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
- Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
- Node* index = a->Int32Mul(descriptor_number, descriptor_size);
- return a->ChangeInt32ToIntPtr(index);
+Node* CodeStubAssembler::DescriptorNumberToIndex(
+ SloppyTNode<Uint32T> descriptor_number) {
+ Node* descriptor_size = Int32Constant(DescriptorArray::kEntrySize);
+ Node* index = Int32Mul(descriptor_number, descriptor_size);
+ return ChangeInt32ToIntPtr(index);
}
-} // namespace
-
Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- DescriptorNumberToIndex(this, descriptor_number));
+ DescriptorNumberToIndex(descriptor_number));
}
Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
Node* descriptors, Node* descriptor_number) {
- const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorNumberToIndex(this, descriptor_number),
- details_offset);
+ Node* details = DescriptorArrayGetDetails(
+ TNode<DescriptorArray>::UncheckedCast(descriptors),
+ TNode<Uint32T>::UncheckedCast(descriptor_number));
return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
}
Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
Node* descriptor_number) {
const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
- return LoadFixedArrayElement(descriptors,
- DescriptorNumberToIndex(this, descriptor_number),
- key_offset);
+ return LoadFixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), key_offset);
+}
+
+TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
+ TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ return TNode<Uint32T>::UncheckedCast(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), details_offset));
}
void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
@@ -6610,12 +6983,22 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Variable* var_value) {
DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
- Comment("[ LoadPropertyFromFastObject");
Node* details =
LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
var_details->Bind(details);
+ LoadPropertyFromFastObject(object, map, descriptors, name_index, details,
+ var_value);
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+ Node* descriptors,
+ Node* name_index,
+ Node* details,
+ Variable* var_value) {
+ Comment("[ LoadPropertyFromFastObject");
+
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
@@ -6960,6 +7343,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
FLOAT32_ELEMENTS,
FLOAT64_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
+ BIGUINT64_ELEMENTS,
+ BIGINT64_ELEMENTS,
};
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
@@ -6978,6 +7363,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
&if_typedarray,
&if_typedarray,
&if_typedarray,
+ &if_typedarray,
+ &if_typedarray,
};
// clang-format on
STATIC_ASSERT(arraysize(values) == arraysize(labels));
@@ -7358,8 +7745,10 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
}
Node* CodeStubAssembler::LoadFeedbackVector(Node* closure) {
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- return LoadObjectField(cell, Cell::kValueOffset);
+ Node* feedback_cell =
+ LoadObjectField(closure, JSFunction::kFeedbackCellOffset);
+ CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
+ return LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
}
Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
@@ -7435,6 +7824,7 @@ void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)),
if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kthen_stringRootIndex)), if_protector);
// Fall through if no case matched.
}
@@ -7445,23 +7835,23 @@ Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
MachineRepresentation::kTagged);
}
-Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
- VARIABLE(var_intptr_key, MachineType::PointerRepresentation());
+TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
+ TVARIABLE(IntPtrT, var_intptr_key);
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
GotoIfNot(IsHeapNumber(key), miss);
{
- Node* value = LoadHeapNumberValue(key);
- Node* int_value = RoundFloat64ToInt32(value);
+ TNode<Float64T> value = LoadHeapNumberValue(key);
+ TNode<Int32T> int_value = RoundFloat64ToInt32(value);
GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
- var_intptr_key.Bind(ChangeInt32ToIntPtr(int_value));
+ var_intptr_key = ChangeInt32ToIntPtr(int_value);
Goto(&done);
}
BIND(&key_is_smi);
{
- var_intptr_key.Bind(SmiUntag(key));
+ var_intptr_key = SmiUntag(key);
Goto(&done);
}
@@ -7658,7 +8048,7 @@ Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
}
Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
- Node* input, ElementsKind elements_kind, Label* bailout) {
+ TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
MachineRepresentation rep;
@@ -7683,17 +8073,24 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
}
VARIABLE(var_result, rep);
- Label done(this, &var_result), if_smi(this), if_heapnumber(this);
- GotoIf(TaggedIsSmi(input), &if_smi);
+ VARIABLE(var_input, MachineRepresentation::kTagged, input);
+ Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this),
+ convert(this), loop(this, &var_input);
+ Goto(&loop);
+ BIND(&loop);
+ GotoIf(TaggedIsSmi(var_input.value()), &if_smi);
// We can handle both HeapNumber and Oddball here, since Oddball has the
// same layout as the HeapNumber for the HeapNumber::value field. This
// way we can also properly optimize stores of oddballs to typed arrays.
- GotoIf(IsHeapNumber(input), &if_heapnumber);
- Branch(HasInstanceType(input, ODDBALL_TYPE), &if_heapnumber, bailout);
+ GotoIf(IsHeapNumber(var_input.value()), &if_heapnumber_or_oddball);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Branch(HasInstanceType(var_input.value(), ODDBALL_TYPE),
+ &if_heapnumber_or_oddball, &convert);
- BIND(&if_heapnumber);
+ BIND(&if_heapnumber_or_oddball);
{
- Node* value = LoadHeapNumberValue(input);
+ Node* value = UncheckedCast<Float64T>(LoadObjectField(
+ var_input.value(), HeapNumber::kValueOffset, MachineType::Float64()));
if (rep == MachineRepresentation::kWord32) {
if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
value = Float64ToUint8Clamped(value);
@@ -7711,7 +8108,7 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
BIND(&if_smi);
{
- Node* value = SmiToWord32(input);
+ Node* value = SmiToInt32(var_input.value());
if (rep == MachineRepresentation::kFloat32) {
value = RoundInt32ToFloat32(value);
} else if (rep == MachineRepresentation::kFloat64) {
@@ -7726,67 +8123,125 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
Goto(&done);
}
+ BIND(&convert);
+ {
+ var_input.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, input));
+ Goto(&loop);
+ }
+
BIND(&done);
return var_result.value();
}
+void CodeStubAssembler::EmitBigTypedArrayElementStore(
+ TNode<JSTypedArray> object, TNode<FixedTypedArrayBase> elements,
+ TNode<IntPtrT> intptr_key, TNode<Object> value, TNode<Context> context,
+ Label* opt_if_neutered) {
+ TNode<BigInt> bigint_value = ToBigInt(context, value);
+ TNode<WordT> bitfield = LoadBigIntBitfield(bigint_value);
+ TNode<UintPtrT> length = DecodeWord<BigIntBase::LengthBits>(bitfield);
+ TNode<UintPtrT> sign = DecodeWord<BigIntBase::SignBits>(bitfield);
+ TVARIABLE(UintPtrT, var_low, Unsigned(IntPtrConstant(0)));
+ // Only used on 32-bit platforms.
+ TVARIABLE(UintPtrT, var_high, Unsigned(IntPtrConstant(0)));
+ Label do_store(this);
+ GotoIf(WordEqual(length, IntPtrConstant(0)), &do_store);
+ var_low = LoadBigIntDigit(bigint_value, 0);
+ if (!Is64()) {
+ Label load_done(this);
+ GotoIf(WordEqual(length, IntPtrConstant(1)), &load_done);
+ var_high = LoadBigIntDigit(bigint_value, 1);
+ Goto(&load_done);
+ BIND(&load_done);
+ }
+ GotoIf(WordEqual(sign, IntPtrConstant(0)), &do_store);
+ // Negative value. Simulate two's complement.
+ if (!Is64()) {
+ var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high.value()));
+ Label no_carry(this);
+ GotoIf(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry);
+ var_high = Unsigned(IntPtrSub(var_high.value(), IntPtrConstant(1)));
+ Goto(&no_carry);
+ BIND(&no_carry);
+ }
+ var_low = Unsigned(IntPtrSub(IntPtrConstant(0), var_low.value()));
+ Goto(&do_store);
+
+ BIND(&do_store);
+ if (opt_if_neutered != nullptr) {
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), opt_if_neutered);
+ }
+
+ Node* backing_store = LoadFixedTypedArrayBackingStore(elements);
+ Node* offset = ElementOffsetFromIndex(intptr_key, BIGINT64_ELEMENTS,
+ INTPTR_PARAMETERS, 0);
+ MachineRepresentation rep = WordT::kMachineRepresentation;
+ StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
+ if (!Is64()) {
+ StoreNoWriteBarrier(rep, backing_store,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ var_high.value());
+ }
+}
+
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
bool is_jsarray,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
- Label* bailout) {
+ Label* bailout, Node* context) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+
Node* elements = LoadElements(object);
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- // Bailout in case of COW elements.
- GotoIf(WordNotEqual(LoadMap(elements),
- LoadRoot(Heap::kFixedArrayMapRootIndex)),
- bailout);
+ if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ } else if (!IsCOWHandlingStoreMode(store_mode)) {
+ GotoIf(IsFixedCOWArrayMap(LoadMap(elements)), bailout);
}
+
// TODO(ishell): introduce TryToIntPtrOrSmi() and use OptimalParameterMode().
ParameterMode parameter_mode = INTPTR_PARAMETERS;
- key = TryToIntptr(key, bailout);
+ TNode<IntPtrT> intptr_key = TryToIntptr(key, bailout);
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Label done(this);
- // TODO(ishell): call ToNumber() on value and don't bailout but be careful
- // to call it only once if we decide to bailout because of bounds checks.
-
- value = PrepareValueForWriteToTypedArray(value, elements_kind, bailout);
-
- // There must be no allocations between the buffer load and
- // and the actual store to backing store, because GC may decide that
- // the buffer is not alive or move the elements.
- // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
-
- // Check if buffer has been neutered.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), bailout);
-
// Bounds check.
Node* length = TaggedToParameter(
CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)),
parameter_mode);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- // Skip the store if we write beyond the length.
- GotoIfNot(IntPtrLessThan(key, length), &done);
- // ... but bailout if the key is negative.
+ // Skip the store if we write beyond the length or
+ // to a property with a negative integer index.
+ GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
} else {
DCHECK_EQ(STANDARD_STORE, store_mode);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
- GotoIfNot(UintPtrLessThan(key, length), bailout);
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* base_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store =
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
- StoreElement(backing_store, elements_kind, key, value, parameter_mode);
+ TNode<Object> value_obj = UncheckedCast<Object>(value);
+ if (elements_kind == BIGINT64_ELEMENTS ||
+ elements_kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(CAST(object), CAST(elements), intptr_key,
+ value_obj, CAST(context), bailout);
+ } else {
+ value = PrepareValueForWriteToTypedArray(value_obj, elements_kind,
+ CAST(context));
+
+ // There must be no allocations between the buffer load and
+ // and the actual store to backing store, because GC may decide that
+ // the buffer is not alive or move the elements.
+ // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
+
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+
+ Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
+ StoreElement(backing_store, elements_kind, intptr_key, value,
+ parameter_mode);
+ }
Goto(&done);
BIND(&done);
@@ -7809,32 +8264,39 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
}
if (IsGrowStoreMode(store_mode)) {
- elements = CheckForCapacityGrow(object, elements, elements_kind, length,
- key, parameter_mode, is_jsarray, bailout);
+ elements = CheckForCapacityGrow(object, elements, elements_kind, store_mode,
+ length, intptr_key, parameter_mode,
+ is_jsarray, bailout);
} else {
- GotoIfNot(UintPtrLessThan(key, length), bailout);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
+ }
- if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
- IsSmiOrObjectElementsKind(elements_kind)) {
- elements = CopyElementsOnWrite(object, elements, elements_kind, length,
- parameter_mode, bailout);
- }
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ } else if (IsCOWHandlingStoreMode(store_mode)) {
+ elements = CopyElementsOnWrite(object, elements, elements_kind, length,
+ parameter_mode, bailout);
}
- StoreElement(elements, elements_kind, key, value, parameter_mode);
+
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ StoreElement(elements, elements_kind, intptr_key, value, parameter_mode);
}
-Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
- ElementsKind kind, Node* length,
- Node* key, ParameterMode mode,
- bool is_js_array,
- Label* bailout) {
+Node* CodeStubAssembler::CheckForCapacityGrow(
+ Node* object, Node* elements, ElementsKind kind,
+ KeyedAccessStoreMode store_mode, Node* length, Node* key,
+ ParameterMode mode, bool is_js_array, Label* bailout) {
VARIABLE(checked_elements, MachineRepresentation::kTagged);
- Label grow_case(this), no_grow_case(this), done(this);
+ Label grow_case(this), no_grow_case(this), done(this),
+ grow_bailout(this, Label::kDeferred);
Node* condition;
if (IsHoleyOrDictionaryElementsKind(kind)) {
condition = UintPtrGreaterThanOrEqual(key, length);
} else {
+ // We don't support growing here unless the value is being appended.
condition = WordEqual(key, length);
}
Branch(condition, &grow_case, &no_grow_case);
@@ -7843,20 +8305,32 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
{
Node* current_capacity =
TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
-
checked_elements.Bind(elements);
-
Label fits_capacity(this);
+ // If key is negative, we will notice in Runtime::kGrowArrayElements.
GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
+
{
Node* new_elements = TryGrowElementsCapacity(
- object, elements, kind, key, current_capacity, mode, bailout);
-
+ object, elements, kind, key, current_capacity, mode, &grow_bailout);
checked_elements.Bind(new_elements);
Goto(&fits_capacity);
}
- BIND(&fits_capacity);
+ BIND(&grow_bailout);
+ {
+ Node* tagged_key = mode == SMI_PARAMETERS
+ ? key
+ : ChangeInt32ToTagged(TruncateIntPtrToInt32(key));
+ Node* maybe_elements = CallRuntime(
+ Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
+ GotoIf(TaggedIsSmi(maybe_elements), bailout);
+ CSA_ASSERT(this, IsFixedArrayWithKind(maybe_elements, kind));
+ checked_elements.Bind(maybe_elements);
+ Goto(&fits_capacity);
+ }
+
+ BIND(&fits_capacity);
if (is_js_array) {
Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
@@ -7883,15 +8357,12 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
VARIABLE(new_elements_var, MachineRepresentation::kTagged, elements);
Label done(this);
- GotoIfNot(
- WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
- &done);
+ GotoIfNot(IsFixedCOWArrayMap(LoadMap(elements)), &done);
{
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
length, capacity, mode, bailout);
-
new_elements_var.Bind(new_elements);
Goto(&done);
}
@@ -8243,19 +8714,22 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
{
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(Float64LessThan(var_left_float.value(), var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(Float64LessThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(
+ Float64GreaterThan(var_left_float.value(), var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64GreaterThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
if_true, if_false);
break;
default:
@@ -8649,19 +9123,22 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
{
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(var_left_float, var_right_float), &return_true,
- &return_false);
+ Branch(Float64LessThan(var_left_float.value(), var_right_float.value()),
+ &return_true, &return_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64LessThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
&return_true, &return_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(var_left_float, var_right_float),
- &return_true, &return_false);
+ Branch(
+ Float64GreaterThan(var_left_float.value(), var_right_float.value()),
+ &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64GreaterThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
&return_true, &return_false);
break;
default:
@@ -8682,7 +9159,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
}
BIND(&end);
- return var_result;
+ return var_result.value();
}
Node* CodeStubAssembler::CollectFeedbackForString(Node* instance_type) {
@@ -9090,8 +9567,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&do_float_comparison);
{
- Branch(Float64Equal(var_left_float, var_right_float), &if_equal,
- &if_notequal);
+ Branch(Float64Equal(var_left_float.value(), var_right_float.value()),
+ &if_equal, &if_notequal);
}
BIND(&if_equal);
@@ -9621,73 +10098,8 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
}
BIND(&end);
- CSA_ASSERT(this, IsBoolean(result));
- return result;
-}
-
-Node* CodeStubAssembler::ClassOf(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTaggedPointer);
- Label if_function_template_info(this, Label::kDeferred),
- if_no_class_name(this, Label::kDeferred),
- if_function(this, Label::kDeferred), if_object(this, Label::kDeferred),
- if_primitive(this, Label::kDeferred), return_result(this);
-
- // Check if {value} is a Smi.
- GotoIf(TaggedIsSmi(value), &if_primitive);
-
- Node* value_map = LoadMap(value);
- Node* value_instance_type = LoadMapInstanceType(value_map);
-
- // Check if {value} is a JSFunction or JSBoundFunction.
- STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
- GotoIf(Uint32LessThanOrEqual(Int32Constant(FIRST_FUNCTION_TYPE),
- value_instance_type),
- &if_function);
-
- // Check if {value} is a primitive HeapObject.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- GotoIfNot(IsJSReceiverInstanceType(value_instance_type), &if_primitive);
-
- // Load the {value}s constructor, and check that it's a JSFunction.
- Node* constructor = LoadMapConstructor(value_map);
- GotoIf(HasInstanceType(constructor, FUNCTION_TEMPLATE_INFO_TYPE),
- &if_function_template_info);
- GotoIfNot(IsJSFunction(constructor), &if_object);
-
- // Return the instance class name for the {constructor}.
- Node* shared_info =
- LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
- Node* instance_class_name = LoadObjectField(
- shared_info, SharedFunctionInfo::kInstanceClassNameOffset);
- var_result.Bind(instance_class_name);
- Goto(&return_result);
-
- // For remote objects the constructor might be given as FTI.
- BIND(&if_function_template_info);
- Node* class_name =
- LoadObjectField(constructor, FunctionTemplateInfo::kClassNameOffset);
- GotoIf(IsUndefined(class_name), &if_no_class_name);
- var_result.Bind(class_name);
- Goto(&return_result);
-
- BIND(&if_no_class_name);
- var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_function);
- var_result.Bind(LoadRoot(Heap::kFunction_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_object);
- var_result.Bind(LoadRoot(Heap::kObject_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_primitive);
- var_result.Bind(NullConstant());
- Goto(&return_result);
-
- BIND(&return_result);
- return var_result.value();
+ CSA_ASSERT(this, IsBoolean(result.value()));
+ return result.value();
}
Node* CodeStubAssembler::Typeof(Node* value) {
@@ -9801,6 +10213,46 @@ Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
return result.value();
}
+Node* CodeStubAssembler::SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor) {
+ Isolate* isolate = this->isolate();
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ var_result.Bind(default_constructor);
+
+ // 2. Let C be ? Get(O, "constructor").
+ Node* const constructor =
+ GetProperty(context, object, isolate->factory()->constructor_string());
+
+ // 3. If C is undefined, return defaultConstructor.
+ Label out(this);
+ GotoIf(IsUndefined(constructor), &out);
+
+ // 4. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, constructor,
+ MessageTemplate::kConstructorNotReceiver);
+
+ // 5. Let S be ? Get(C, @@species).
+ Node* const species =
+ GetProperty(context, constructor, isolate->factory()->species_symbol());
+
+ // 6. If S is either undefined or null, return defaultConstructor.
+ GotoIf(IsNullOrUndefined(species), &out);
+
+ // 7. If IsConstructor(S) is true, return S.
+ Label throw_error(this);
+ GotoIf(TaggedIsSmi(species), &throw_error);
+ GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
+ var_result.Bind(species);
+ Goto(&out);
+
+ // 8. Throw a TypeError exception.
+ BIND(&throw_error);
+ ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
+
+ BIND(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
Node* context) {
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -9871,16 +10323,10 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
}
BIND(&if_notcallable);
- {
- CallRuntime(Runtime::kThrowNonCallableInInstanceOfCheck, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNonCallableInInstanceOfCheck); }
BIND(&if_notreceiver);
- {
- CallRuntime(Runtime::kThrowNonObjectInInstanceOfCheck, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); }
BIND(&return_true);
var_result.Bind(TrueConstant());
@@ -9894,50 +10340,51 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
return var_result.value();
}
-Node* CodeStubAssembler::NumberInc(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_finc_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
+ TVARIABLE(Number, var_result);
+ TVARIABLE(Float64T, var_finc_value);
Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
BIND(&if_issmi);
{
// Try fast Smi addition first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
+ TNode<Smi> one = SmiConstant(1);
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(
+ BitcastTaggedToWord(value), BitcastTaggedToWord(one));
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi addition overflowed.
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&if_overflow);
{
- var_finc_value.Bind(SmiToFloat64(value));
+ TNode<Smi> smi_value = CAST(value);
+ var_finc_value = SmiToFloat64(smi_value);
Goto(&do_finc);
}
}
BIND(&if_isnotsmi);
{
- CSA_ASSERT(this, IsHeapNumber(value));
+ TNode<HeapNumber> heap_number_value = CAST(value);
// Load the HeapNumber value.
- var_finc_value.Bind(LoadHeapNumberValue(value));
+ var_finc_value = LoadHeapNumberValue(heap_number_value);
Goto(&do_finc);
}
BIND(&do_finc);
{
- Node* finc_value = var_finc_value.value();
- Node* one = Float64Constant(1.0);
- Node* finc_result = Float64Add(finc_value, one);
- var_result.Bind(AllocateHeapNumberWithValue(finc_result));
+ TNode<Float64T> finc_value = var_finc_value.value();
+ TNode<Float64T> one = Float64Constant(1.0);
+ TNode<Float64T> finc_result = Float64Add(finc_value, one);
+ var_result = AllocateHeapNumberWithValue(finc_result);
Goto(&end);
}
@@ -9945,50 +10392,51 @@ Node* CodeStubAssembler::NumberInc(Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberDec(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fdec_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
+ TVARIABLE(Number, var_result);
+ TVARIABLE(Float64T, var_fdec_value);
Label if_issmi(this), if_isnotsmi(this), do_fdec(this), end(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
BIND(&if_issmi);
{
- // Try fast Smi addition first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
+ // Try fast Smi subtraction first.
+ TNode<Smi> one = SmiConstant(1);
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
+ BitcastTaggedToWord(value), BitcastTaggedToWord(one));
+ TNode<BoolT> overflow = Projection<1>(pair);
- // Check if the Smi addition overflowed.
+ // Check if the Smi subtraction overflowed.
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&if_overflow);
{
- var_fdec_value.Bind(SmiToFloat64(value));
+ TNode<Smi> smi_value = CAST(value);
+ var_fdec_value = SmiToFloat64(smi_value);
Goto(&do_fdec);
}
}
BIND(&if_isnotsmi);
{
- CSA_ASSERT(this, IsHeapNumber(value));
+ TNode<HeapNumber> heap_number_value = CAST(value);
// Load the HeapNumber value.
- var_fdec_value.Bind(LoadHeapNumberValue(value));
+ var_fdec_value = LoadHeapNumberValue(heap_number_value);
Goto(&do_fdec);
}
BIND(&do_fdec);
{
- Node* fdec_value = var_fdec_value.value();
- Node* minus_one = Float64Constant(-1.0);
- Node* fdec_result = Float64Add(fdec_value, minus_one);
- var_result.Bind(AllocateHeapNumberWithValue(fdec_result));
+ TNode<Float64T> fdec_value = var_fdec_value.value();
+ TNode<Float64T> minus_one = Float64Constant(-1.0);
+ TNode<Float64T> fdec_result = Float64Add(fdec_value, minus_one);
+ var_result = AllocateHeapNumberWithValue(fdec_result);
Goto(&end);
}
@@ -9996,29 +10444,29 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberAdd(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fadd_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
+ TVARIABLE(Number, var_result);
Label float_add(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_add);
GotoIf(TaggedIsNotSmi(b), &float_add);
// Try fast Smi addition first.
- Node* pair =
+ TNode<PairT<IntPtrT, BoolT>> pair =
IntPtrAddWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
- Node* overflow = Projection(1, pair);
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi addition overflowed.
Label if_overflow(this), if_notoverflow(this);
GotoIf(overflow, &float_add);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&float_add);
{
- var_result.Bind(ChangeFloat64ToTagged(
- Float64Add(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ var_result = ChangeFloat64ToTagged(
+ Float64Add(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b)));
Goto(&end);
}
@@ -10026,29 +10474,29 @@ Node* CodeStubAssembler::NumberAdd(Node* a, Node* b) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberSub(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fsub_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberSub(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
+ TVARIABLE(Number, var_result);
Label float_sub(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_sub);
GotoIf(TaggedIsNotSmi(b), &float_sub);
// Try fast Smi subtraction first.
- Node* pair =
+ TNode<PairT<IntPtrT, BoolT>> pair =
IntPtrSubWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
- Node* overflow = Projection(1, pair);
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi subtraction overflowed.
Label if_overflow(this), if_notoverflow(this);
GotoIf(overflow, &float_sub);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&float_sub);
{
- var_result.Bind(ChangeFloat64ToTagged(
- Float64Sub(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ var_result = ChangeFloat64ToTagged(
+ Float64Sub(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b)));
Goto(&end);
}
@@ -10348,15 +10796,6 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
return result;
}
-Node* CodeStubAssembler::TypedArraySpeciesCreateByLength(Node* context,
- Node* originalArray,
- Node* len) {
- // TODO(tebbi): Install a fast path as well, which avoids the runtime
- // call.
- return CallRuntime(Runtime::kTypedArraySpeciesCreateByLength, context,
- originalArray, len);
-}
-
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
@@ -10430,7 +10869,7 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
assembler_->Goto(&argument_done);
assembler_->BIND(&argument_done);
- return result;
+ return result.value();
}
void CodeStubArguments::ForEach(
@@ -10470,7 +10909,7 @@ void CodeStubArguments::PopAndReturn(Node* value) {
pop_count = argc_;
}
- assembler_->PopAndReturn(assembler_->ParameterToWord(pop_count, argc_mode_),
+ assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_),
value);
}
@@ -10534,8 +10973,8 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(fun, JSFunction::kFeedbackVectorOffset,
- Heap::kUndefinedCellRootIndex);
+ StoreObjectFieldRoot(fun, JSFunction::kFeedbackCellOffset,
+ Heap::kManyClosuresCellRootIndex);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
@@ -10543,28 +10982,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
return fun;
}
-Node* CodeStubAssembler::AllocatePromiseReactionJobInfo(
- Node* value, Node* tasks, Node* deferred_promise, Node* deferred_on_resolve,
- Node* deferred_on_reject, Node* context) {
- Node* const result = Allocate(PromiseReactionJobInfo::kSize);
- StoreMapNoWriteBarrier(result, Heap::kPromiseReactionJobInfoMapRootIndex);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kValueOffset,
- value);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kTasksOffset,
- tasks);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredPromiseOffset, deferred_promise);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredOnResolveOffset,
- deferred_on_resolve);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredOnRejectOffset,
- deferred_on_reject);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kContextOffset,
- context);
- return result;
-}
-
Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
StackFrame::Type frame_type) {
return WordEqual(marker_or_function,
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 0dd7a35c4a..8fca0b667f 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -64,12 +64,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(Tuple3Map, tuple3_map, Tuple3Map) \
V(UndefinedValue, undefined_value, Undefined) \
V(WeakCellMap, weak_cell_map, WeakCellMap) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(promise_default_reject_handler_symbol, \
- promise_default_reject_handler_symbol, PromiseDefaultRejectHandlerSymbol) \
- V(promise_default_resolve_handler_symbol, \
- promise_default_resolve_handler_symbol, \
- PromiseDefaultResolveHandlerSymbol)
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)
// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
// here to simplify use in other generated builtins.
@@ -131,18 +126,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return ParameterRepresentation(OptimalParameterMode());
}
- Node* ParameterToWord(Node* value, ParameterMode mode) {
+ Node* ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
return value;
}
- Node* WordToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
+ Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) return SmiTag(value);
return value;
}
- Node* Word32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
- return WordToParameter(ChangeInt32ToIntPtr(value), mode);
+ Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
+ return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
}
TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
@@ -223,17 +218,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Select the minimum of the two provided Number values.
TNode<Object> NumberMin(SloppyTNode<Object> left, SloppyTNode<Object> right);
- // Tag a Word as a Smi value.
+ // After converting an index to an integer, calculate a relative index: if
+ // index < 0, max(length + index, 0); else min(index, length)
+ TNode<IntPtrT> ConvertToRelativeIndex(TNode<Context> context,
+ TNode<Object> index,
+ TNode<IntPtrT> length);
+
+ // Tag an IntPtr as a Smi value.
TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
- // Untag a Smi value as a Word.
+ // Untag a Smi value as an IntPtr.
TNode<IntPtrT> SmiUntag(SloppyTNode<Smi> value);
// Smi conversions.
TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
- TNode<Smi> SmiFromWord(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
- TNode<Smi> SmiFromWord32(SloppyTNode<Int32T> value);
- TNode<IntPtrT> SmiToWord(SloppyTNode<Smi> value) { return SmiUntag(value); }
- TNode<Int32T> SmiToWord32(SloppyTNode<Smi> value);
+ TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
+ TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
+ TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
+ TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName) \
@@ -300,10 +301,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* TrySmiDiv(Node* dividend, Node* divisor, Label* bailout);
// Smi | HeapNumber operations.
- Node* NumberInc(Node* value);
- Node* NumberDec(Node* value);
- Node* NumberAdd(Node* a, Node* b);
- Node* NumberSub(Node* a, Node* b);
+ TNode<Number> NumberInc(SloppyTNode<Number> value);
+ TNode<Number> NumberDec(SloppyTNode<Number> value);
+ TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
+ TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
@@ -318,8 +319,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* InnerAllocate(Node* previous, Node* offset);
Node* IsRegularHeapObjectSize(Node* size);
+ typedef std::function<void(Label*, Label*)> BranchGenerator;
typedef std::function<Node*()> NodeGenerator;
+ void Assert(const BranchGenerator& branch, const char* message = nullptr,
+ const char* file = nullptr, int line = 0,
+ Node* extra_node1 = nullptr, const char* extra_node1_name = "",
+ Node* extra_node2 = nullptr, const char* extra_node2_name = "",
+ Node* extra_node3 = nullptr, const char* extra_node3_name = "",
+ Node* extra_node4 = nullptr, const char* extra_node4_name = "",
+ Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Assert(const NodeGenerator& condition_body,
const char* message = nullptr, const char* file = nullptr,
int line = 0, Node* extra_node1 = nullptr,
@@ -328,6 +337,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
const char* extra_node3_name = "", Node* extra_node4 = nullptr,
const char* extra_node4_name = "", Node* extra_node5 = nullptr,
const char* extra_node5_name = "");
+ void Check(const BranchGenerator& branch, const char* message = nullptr,
+ const char* file = nullptr, int line = 0,
+ Node* extra_node1 = nullptr, const char* extra_node1_name = "",
+ Node* extra_node2 = nullptr, const char* extra_node2_name = "",
+ Node* extra_node3 = nullptr, const char* extra_node3_name = "",
+ Node* extra_node4 = nullptr, const char* extra_node4_name = "",
+ Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Check(const NodeGenerator& condition_body, const char* message = nullptr,
const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
@@ -341,15 +357,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class A, class F, class G>
TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
const G& false_body, MachineRepresentation rep) {
- return UncheckedCast<A>(
- Select(condition,
- [&]() -> Node* {
- return base::implicit_cast<SloppyTNode<A>>(true_body());
- },
- [&]() -> Node* {
- return base::implicit_cast<SloppyTNode<A>>(false_body());
- },
- rep));
+ return UncheckedCast<A>(Select(
+ condition,
+ [&]() -> Node* { return base::implicit_cast<TNode<A>>(true_body()); },
+ [&]() -> Node* { return base::implicit_cast<TNode<A>>(false_body()); },
+ rep));
}
Node* SelectConstant(Node* condition, Node* true_value, Node* false_value,
@@ -385,7 +397,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Smi::FromInt(false_value));
}
- TNode<Int32T> TruncateWordToWord32(SloppyTNode<IntPtrT> value);
+ TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
@@ -497,6 +509,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
Node* HasInstanceType(Node* object, InstanceType type);
+ // Determines whether Array Iterator's prototype has changed.
+ TNode<BoolT> HasInitialArrayIteratorPrototypeMap(
+ TNode<Context> native_context);
+ // Determines whether Array's prototype has changed.
+ TNode<BoolT> InitialArrayPrototypeHasInitialArrayPrototypeMap(
+ TNode<Context> native_context);
+ // Determines whether an array's elements map has changed.
+ TNode<BoolT> HasInitialFastElementsKindMap(TNode<Context> native_context,
+ TNode<JSArray> jsarray);
Node* DoesntHaveInstanceType(Node* object, InstanceType type);
Node* TaggedDoesntHaveInstanceType(Node* any_tagged, InstanceType type);
// Load the properties backing store of a JSObject.
@@ -607,12 +628,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadDoubleWithHoleCheck(
Node* base, Node* offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- Node* LoadFixedTypedArrayElement(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
+ TNode<FixedTypedArrayBase> typed_array);
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ // Parts of the above, factored out for readability:
+ Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
+ Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
+ Node* offset);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -727,39 +751,59 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Allocate a HeapNumber with a specific value.
TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value,
MutableMode mode = IMMUTABLE);
+ // Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
+ // Does not initialize the digits.
+ TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
+ // Like above, but allowing custom bitfield initialization.
+ TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
+ void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<WordT> bitfield);
+ void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ TNode<UintPtrT> digit);
+ TNode<WordT> LoadBigIntBitfield(TNode<BigInt> bigint);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
// Allocate a SeqOneByteString with the given length.
- Node* AllocateSeqOneByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqOneByteString(Node* context, TNode<Smi> length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(int length,
+ AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(Node* context, TNode<Smi> length,
+ AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
- Node* AllocateSeqTwoByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(int length,
+ AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
+ AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedOneByteString(TNode<Smi> length, Node* parent,
- Node* offset);
+
+ TNode<String> AllocateSlicedOneByteString(TNode<Smi> length,
+ TNode<String> parent,
+ TNode<Smi> offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedTwoByteString(TNode<Smi> length, Node* parent,
- Node* offset);
+ TNode<String> AllocateSlicedTwoByteString(TNode<Smi> length,
+ TNode<String> parent,
+ TNode<Smi> offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
- Node* AllocateOneByteConsString(TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateOneByteConsString(TNode<Smi> length,
+ TNode<String> first,
+ TNode<String> second,
+ AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
- Node* AllocateTwoByteConsString(TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateTwoByteConsString(TNode<Smi> length,
+ TNode<String> first,
+ TNode<String> second,
+ AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |left| and |right|.
- Node* NewConsString(Node* context, TNode<Smi> length, Node* left, Node* right,
- AllocationFlags flags = kNone);
+ TNode<String> NewConsString(Node* context, TNode<Smi> length,
+ TNode<String> left, TNode<String> right,
+ AllocationFlags flags = kNone);
Node* AllocateNameDictionary(int at_least_space_for);
Node* AllocateNameDictionary(Node* at_least_space_for);
@@ -787,9 +831,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int start_offset = JSObject::kHeaderSize);
// Allocate a JSArray without elements and initialize the header fields.
- Node* AllocateUninitializedJSArrayWithoutElements(Node* array_map,
- Node* length,
- Node* allocation_site);
+ Node* AllocateUninitializedJSArrayWithoutElements(
+ Node* array_map, Node* length, Node* allocation_site = nullptr);
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
// The ParameterMode argument is only used for the capacity parameter.
@@ -827,9 +870,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
- Node* TypedArraySpeciesCreateByLength(Node* context, Node* originalArray,
- Node* len);
-
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index,
Heap::RootListIndex value_root_index,
@@ -1029,10 +1069,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
char const* method_name);
- // Throws a TypeError for {method_name}. Terminates the current block.
- void ThrowIncompatibleMethodReceiver(Node* context, char const* method_name,
- Node* receiver);
-
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
Node* ThrowIfNotInstanceType(Node* context, Node* value,
@@ -1068,6 +1104,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsCallableMap(Node* map);
Node* IsCallable(Node* object);
Node* IsCell(Node* object);
+ Node* IsCode(Node* object);
Node* IsConsStringInstanceType(Node* instance_type);
Node* IsConstructorMap(Node* map);
Node* IsConstructor(Node* object);
@@ -1077,6 +1114,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsExternalStringInstanceType(Node* instance_type);
TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context);
+ TNode<BoolT> IsFastJSArrayWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context,
+ TNode<Context> native_context);
+ Node* IsFeedbackCell(Node* object);
Node* IsFeedbackVector(Node* object);
Node* IsFixedArray(Node* object);
Node* IsFixedArraySubclass(Node* object);
@@ -1092,14 +1133,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsJSArrayInstanceType(Node* instance_type);
Node* IsJSArrayMap(Node* object);
Node* IsJSArray(Node* object);
+ Node* IsJSAsyncGeneratorObject(Node* object);
Node* IsJSFunctionInstanceType(Node* instance_type);
Node* IsJSFunctionMap(Node* object);
Node* IsJSFunction(Node* object);
+ Node* IsJSGeneratorObject(Node* object);
Node* IsJSGlobalProxyInstanceType(Node* instance_type);
Node* IsJSGlobalProxy(Node* object);
Node* IsJSObjectInstanceType(Node* instance_type);
Node* IsJSObjectMap(Node* map);
Node* IsJSObject(Node* object);
+ Node* IsJSPromiseMap(Node* map);
+ Node* IsJSPromise(Node* object);
Node* IsJSProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
Node* IsJSReceiverMap(Node* map);
@@ -1119,14 +1164,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsOneByteStringInstanceType(Node* instance_type);
Node* IsPrimitiveInstanceType(Node* instance_type);
Node* IsPrivateSymbol(Node* object);
+ Node* IsPromiseCapability(Node* object);
Node* IsPropertyArray(Node* object);
Node* IsPropertyCell(Node* object);
Node* IsPrototypeInitialArrayPrototype(Node* context, Node* map);
+ TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
+ SloppyTNode<Map> map);
Node* IsSequentialStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSpecialReceiverInstanceType(Node* instance_type);
Node* IsSpecialReceiverMap(Node* map);
- Node* IsSpeciesProtectorCellInvalid();
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
Node* IsSymbolInstanceType(Node* instance_type);
@@ -1139,6 +1186,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return IsSharedFunctionInfoMap(LoadMap(object));
}
+ Node* IsPromiseThenProtectorCellInvalid();
+ Node* IsSpeciesProtectorCellInvalid();
+
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
@@ -1170,18 +1220,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Return the single character string with only {code}.
TNode<String> StringFromCharCode(TNode<Int32T> code);
- enum class SubStringFlags { NONE, FROM_TO_ARE_BOUNDED };
-
// Return a new string object which holds a substring containing the range
// [from,to[ of string. |from| and |to| are expected to be tagged.
- // If flags has the value FROM_TO_ARE_BOUNDED then from and to are in
- // the range [0, string-length)
- Node* SubString(Node* context, Node* string, Node* from, Node* to,
- SubStringFlags flags = SubStringFlags::NONE);
+ TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
+ TNode<IntPtrT> to);
// Return a new string object produced by concatenating |first| with |second|.
- Node* StringAdd(Node* context, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> StringAdd(Node* context, TNode<String> first,
+ TNode<String> second, AllocationFlags flags = kNone);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
@@ -1205,9 +1251,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
- TNode<Number> StringToNumber(SloppyTNode<String> input);
+ TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
- Node* NumberToString(Node* input);
+ TNode<String> NumberToString(TNode<Number> input);
// Convert an object to a name.
Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
@@ -1224,6 +1270,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> ToNumber(
SloppyTNode<Context> context, SloppyTNode<Object> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
+ TNode<Number> ToNumber_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
+
+ // Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers).
+ // https://tc39.github.io/proposal-bigint/#sec-to-bigint
+ TNode<BigInt> ToBigInt(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
@@ -1240,16 +1293,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
+ TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
+
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
};
// ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi.
- Node* ToSmiIndex(Node* const input, Node* const context, Label* range_error);
+ TNode<Smi> ToSmiIndex(TNode<Object> input, TNode<Context> context,
+ Label* range_error);
// ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi.
- Node* ToSmiLength(Node* input, Node* const context, Label* range_error);
+ TNode<Smi> ToSmiLength(TNode<Object> input, TNode<Context> context,
+ Label* range_error);
// ES6 7.1.15 ToLength, but with inlined fast path.
Node* ToLength_Inline(Node* const context, Node* const input);
@@ -1287,7 +1345,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <typename BitField>
TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
return UncheckedCast<Uint32T>(
- TruncateWordToWord32(Signed(DecodeWord<BitField>(word))));
+ TruncateIntPtrToInt32(Signed(DecodeWord<BitField>(word))));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
@@ -1554,36 +1612,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_not_found, Label* if_bailout,
GetOwnPropertyMode mode);
- Node* GetProperty(Node* context, Node* receiver, Handle<Name> name) {
+ TNode<Object> GetProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> receiver, Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
- Node* GetProperty(Node* context, Node* receiver, Node* const name) {
- return CallStub(CodeFactory::GetProperty(isolate()), context, receiver,
- name);
+ TNode<Object> GetProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> receiver,
+ SloppyTNode<Object> name) {
+ return UncheckedCast<Object>(
+ CallStub(CodeFactory::GetProperty(isolate()), context, receiver, name));
}
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
template <class... TArgs>
- Node* CallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
+ TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return CallStub(Builtins::CallableFor(isolate(), id), context, args...);
+ return UncheckedCast<Object>(
+ CallStub(Builtins::CallableFor(isolate(), id), context, args...));
}
template <class... TArgs>
- Node* TailCallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ TNode<Object> TailCallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
+ TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
+ return UncheckedCast<Object>(
+ TailCallStub(Builtins::CallableFor(isolate(), id), context, args...));
}
void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
+ void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ Node* name_index, Node* details,
+ Variable* var_value);
+
void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
@@ -1703,20 +1772,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* Int32ToUint8Clamped(Node* int32_value);
Node* Float64ToUint8Clamped(Node* float64_value);
- Node* PrepareValueForWriteToTypedArray(Node* key, ElementsKind elements_kind,
- Label* bailout);
+ Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
+ ElementsKind elements_kind,
+ TNode<Context> context);
// Store value to an elements array with given elements kind.
void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
ParameterMode mode);
+ void EmitBigTypedArrayElementStore(TNode<JSTypedArray> object,
+ TNode<FixedTypedArrayBase> elements,
+ TNode<IntPtrT> intptr_key,
+ TNode<Object> value,
+ TNode<Context> context,
+ Label* opt_if_neutered);
+
void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode, Label* bailout);
+ KeyedAccessStoreMode store_mode, Label* bailout,
+ Node* context);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
- Node* length, Node* key, ParameterMode mode,
- bool is_js_array, Label* bailout);
+ KeyedAccessStoreMode store_mode, Node* length,
+ Node* key, ParameterMode mode, bool is_js_array,
+ Label* bailout);
Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
Node* length, ParameterMode mode, Label* bailout);
@@ -1833,12 +1912,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Context> context,
HasPropertyLookupMode mode);
- Node* ClassOf(Node* object);
-
Node* Typeof(Node* value);
Node* GetSuperConstructor(Node* value, Node* context);
+ Node* SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor);
+
Node* InstanceOf(Node* object, Node* callable, Node* context);
// Debug helpers
@@ -1856,11 +1936,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Promise helpers
Node* IsPromiseHookEnabledOrDebugIsActive();
- Node* AllocatePromiseReactionJobInfo(Node* value, Node* tasks,
- Node* deferred_promise,
- Node* deferred_on_resolve,
- Node* deferred_on_reject, Node* context);
-
// Helpers for StackFrame markers.
Node* MarkerIsFrameType(Node* marker_or_function,
StackFrame::Type frame_type);
@@ -1906,17 +1981,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ Node* DescriptorNumberToIndex(SloppyTNode<Uint32T> descriptor_number);
// Implements DescriptorArray::ToKeyIndex.
// Returns an untagged IntPtr.
Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
// Implements DescriptorArray::GetKey.
Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+ // Implements DescriptorArray::GetKey.
+ TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> descriptor_number);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
- Node* TryToIntptr(Node* key, Label* miss);
+ TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
void BranchIfPrototypesHaveNoElements(Node* receiver_map,
Label* definitely_no_elements,
@@ -1949,12 +2028,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout);
- Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* parent, Node* offset);
+ TNode<String> AllocateSlicedString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, TNode<String> parent,
+ TNode<Smi> offset);
- Node* AllocateConsString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags);
+ TNode<String> AllocateConsString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, TNode<String> first,
+ TNode<String> second, AllocationFlags flags);
// Implements DescriptorArray::number_of_entries.
// Returns an untagged int32.
@@ -1967,10 +2047,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* CollectFeedbackForString(Node* instance_type);
void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
Variable* var_type_feedback = nullptr);
- Node* AllocAndCopyStringCharacters(Node* context, Node* from,
- Node* from_instance_type,
- TNode<IntPtrT> from_index,
- TNode<Smi> character_count);
+ TNode<String> AllocAndCopyStringCharacters(Node* from,
+ Node* from_instance_type,
+ TNode<IntPtrT> from_index,
+ TNode<Smi> character_count);
static const int kElementLoopUnrollThreshold = 8;
@@ -2084,7 +2164,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
// string. The result can be either a sequential or external string.
// Jumps to if_bailout if the string if the string is indirect and cannot
// be unpacked.
- Node* TryToDirect(Label* if_bailout);
+ TNode<String> TryToDirect(Label* if_bailout);
// Returns a pointer to the beginning of the string data.
// Jumps to if_bailout if the external string cannot be unpacked.
@@ -2100,7 +2180,9 @@ class ToDirectStringAssembler : public CodeStubAssembler {
Node* string() { return var_string_.value(); }
Node* instance_type() { return var_instance_type_.value(); }
- Node* offset() { return var_offset_.value(); }
+ TNode<IntPtrT> offset() {
+ return UncheckedCast<IntPtrT>(var_offset_.value());
+ }
Node* is_external() { return var_is_external_.value(); }
private:
@@ -2141,31 +2223,39 @@ class ToDirectStringAssembler : public CodeStubAssembler {
CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
nullptr, nullptr)
-#define CSA_ASSERT_GET_CONDITION(x, ...) (x)
-#define CSA_ASSERT_GET_CONDITION_STR(x, ...) #x
+#define CSA_ASSERT_GET_FIRST(x, ...) (x)
+#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
// CSA_ASSERT(csa, <condition>, <extra values to print...>)
// We have to jump through some hoops to allow <extra values to print...> to be
// empty.
-#define CSA_ASSERT(csa, ...) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- return base::implicit_cast<compiler::SloppyTNode<Word32T>>( \
- EXPAND(CSA_ASSERT_GET_CONDITION(__VA_ARGS__))); \
- }, \
- EXPAND(CSA_ASSERT_GET_CONDITION_STR(__VA_ARGS__)), __FILE__, __LINE__, \
+#define CSA_ASSERT(csa, ...) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ return base::implicit_cast<compiler::SloppyTNode<Word32T>>( \
+ EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
+ }, \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- compiler::Node* const argc = \
- (csa)->Parameter(Descriptor::kActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- SmiFromWord32((csa)->Parameter(Descriptor::kActualArgumentsCount)), \
+// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
+// <extra values to print...>)
+
+#define CSA_ASSERT_BRANCH(csa, ...) \
+ (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
+ __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ compiler::Node* const argc = \
+ (csa)->Parameter(Descriptor::kActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ SmiFromInt32((csa)->Parameter(Descriptor::kActualArgumentsCount)), \
"argc")
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -2182,6 +2272,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
#define CSA_ASSERT(csa, ...) ((void)0)
+#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2b98a5bfc7..cfe16d268c 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -71,9 +71,9 @@ void CodeStubDescriptor::Initialize(Register stack_parameter_count,
bool CodeStub::FindCodeInCache(Code** code_out) {
- NumberDictionary* stubs = isolate()->heap()->code_stubs();
+ SimpleNumberDictionary* stubs = isolate()->heap()->code_stubs();
int index = stubs->FindEntry(isolate(), GetKey());
- if (index != NumberDictionary::kNotFound) {
+ if (index != SimpleNumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
return true;
}
@@ -97,10 +97,10 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
- Handle<NumberDictionary> dict(heap->code_stubs());
+ Handle<SimpleNumberDictionary> dict(heap->code_stubs());
int entry = dict->FindEntry(GetKey());
- DCHECK_NE(NumberDictionary::kNotFound, entry);
- dict = NumberDictionary::DeleteEntry(dict, entry);
+ DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
+ dict = SimpleNumberDictionary::DeleteEntry(dict, entry);
heap->SetRootCodeStubs(*dict);
}
@@ -121,17 +121,17 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Generate(&masm);
}
- // Allocate the handler table.
- Handle<HandlerTable> table = GenerateHandlerTable();
+ // Generate the handler table.
+ int handler_table_offset = GenerateHandlerTable(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(isolate(), &desc);
// Copy the generated code into a heap object.
Handle<Code> new_object = factory->NewCode(
- desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId, table,
+ desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId,
MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate()),
- NeedsImmovableCode(), GetKey());
+ NeedsImmovableCode(), GetKey(), false, 0, 0, handler_table_offset);
return new_object;
}
@@ -166,8 +166,8 @@ Handle<Code> CodeStub::GetCode() {
#endif
// Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- NumberDictionary::Set(handle(heap->code_stubs()), GetKey(), new_object);
+ Handle<SimpleNumberDictionary> dict = SimpleNumberDictionary::Set(
+ handle(heap->code_stubs()), GetKey(), new_object);
heap->SetRootCodeStubs(*dict);
code = *new_object;
}
@@ -225,9 +225,7 @@ void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
}
}
-Handle<HandlerTable> PlatformCodeStub::GenerateHandlerTable() {
- return HandlerTable::Empty(isolate());
-}
+int PlatformCodeStub::GenerateHandlerTable(MacroAssembler* masm) { return 0; }
static void InitializeDescriptorDispatchedCall(CodeStub* stub,
void** value_out) {
@@ -289,7 +287,7 @@ TF_STUB(StringAddStub, CodeStubAssembler) {
CodeStubAssembler::AllocationFlag allocation_flags =
(pretenure_flag == TENURED) ? CodeStubAssembler::kPretenured
: CodeStubAssembler::kNone;
- Return(StringAdd(context, left, right, allocation_flags));
+ Return(StringAdd(context, CAST(left), CAST(right), allocation_flags));
} else {
Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
pretenure_flag);
@@ -332,7 +330,7 @@ TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
TransitionElementsKind(receiver, map, stub->from_kind(), stub->to_kind(),
stub->is_jsarray(), &miss);
EmitElementStore(receiver, key, value, stub->is_jsarray(), stub->to_kind(),
- stub->store_mode(), &miss);
+ stub->store_mode(), &miss, context);
Return(value);
}
@@ -434,11 +432,10 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
vector);
}
-Handle<HandlerTable> JSEntryStub::GenerateHandlerTable() {
- Handle<FixedArray> handler_table =
- isolate()->factory()->NewFixedArray(1, TENURED);
- handler_table->set(0, Smi::FromInt(handler_offset_));
- return Handle<HandlerTable>::cast(handler_table);
+int JSEntryStub::GenerateHandlerTable(MacroAssembler* masm) {
+ int handler_table_offset = HandlerTable::EmitReturnTableStart(masm, 1);
+ HandlerTable::EmitReturnEntry(masm, 0, handler_offset_);
+ return handler_table_offset;
}
@@ -524,7 +521,7 @@ TF_STUB(StoreFastElementStub, CodeStubAssembler) {
Label miss(this);
EmitElementStore(receiver, key, value, stub->is_js_array(),
- stub->elements_kind(), stub->store_mode(), &miss);
+ stub->elements_kind(), stub->store_mode(), &miss, context);
Return(value);
BIND(&miss);
@@ -541,12 +538,13 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
StoreFastElementStub(isolate, false, HOLEY_ELEMENTS, STANDARD_STORE)
.GetCode();
StoreFastElementStub(isolate, false, HOLEY_ELEMENTS,
- STORE_AND_GROW_NO_TRANSITION)
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
.GetCode();
for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
ElementsKind kind = static_cast<ElementsKind>(i);
StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
- StoreFastElementStub(isolate, true, kind, STORE_AND_GROW_NO_TRANSITION)
+ StoreFastElementStub(isolate, true, kind,
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
.GetCode();
}
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 751a89fdbd..6d35af1100 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -288,7 +288,7 @@ class PlatformCodeStub : public CodeStub {
virtual void Generate(MacroAssembler* masm) = 0;
// Generates the exception handler table for the stub.
- virtual Handle<HandlerTable> GenerateHandlerTable();
+ virtual int GenerateHandlerTable(MacroAssembler* masm);
DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
};
@@ -692,7 +692,7 @@ class JSEntryStub : public PlatformCodeStub {
}
private:
- Handle<HandlerTable> GenerateHandlerTable() override;
+ int GenerateHandlerTable(MacroAssembler* masm) override;
void PrintName(std::ostream& os) const override { // NOLINT
os << (type() == StackFrame::ENTRY ? "JSEntryStub"
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 927e09a940..046f692c07 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -69,7 +69,7 @@ void CompilationSubCache::Age() {
}
void CompilationSubCache::Iterate(RootVisitor* v) {
- v->VisitRootPointers(Root::kCompilationCache, &tables_[0],
+ v->VisitRootPointers(Root::kCompilationCache, nullptr, &tables_[0],
&tables_[generations_]);
}
@@ -123,11 +123,11 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-InfoVectorPair CompilationCacheScript::Lookup(
+MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- InfoVectorPair result;
+ MaybeHandle<SharedFunctionInfo> result;
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
@@ -135,19 +135,15 @@ InfoVectorPair CompilationCacheScript::Lookup(
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
- InfoVectorPair probe = table->LookupScript(source, context, language_mode);
- if (probe.has_shared()) {
- Handle<SharedFunctionInfo> function_info(probe.shared(), isolate());
- Handle<Cell> vector_handle;
- if (probe.has_vector()) {
- vector_handle = Handle<Cell>(probe.vector(), isolate());
- }
+ MaybeHandle<SharedFunctionInfo> probe =
+ table->LookupScript(source, context, language_mode);
+ Handle<SharedFunctionInfo> function_info;
+ if (probe.ToHandle(&function_info)) {
// Break when we've found a suitable shared function info that
// matches the origin.
if (HasOrigin(function_info, name, line_offset, column_offset,
resource_options)) {
- result = InfoVectorPair(*function_info,
- probe.has_vector() ? *vector_handle : nullptr);
+ result = scope.CloseAndEscape(function_info);
}
}
}
@@ -155,19 +151,13 @@ InfoVectorPair CompilationCacheScript::Lookup(
// Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope.
- if (result.has_shared()) {
+ Handle<SharedFunctionInfo> function_info;
+ if (result.ToHandle(&function_info)) {
#ifdef DEBUG
// Since HasOrigin can allocate, we need to protect the SharedFunctionInfo
- // and the FeedbackVector with handles during the call.
- Handle<SharedFunctionInfo> shared(result.shared(), isolate());
- Handle<Cell> vector_handle;
- if (result.has_vector()) {
- vector_handle = Handle<Cell>(result.vector(), isolate());
- }
- DCHECK(
- HasOrigin(shared, name, line_offset, column_offset, resource_options));
- result =
- InfoVectorPair(*shared, result.has_vector() ? *vector_handle : nullptr);
+ // with handles during the call.
+ DCHECK(HasOrigin(function_info, name, line_offset, column_offset,
+ resource_options));
#endif
isolate()->counters()->compilation_cache_hits()->Increment();
} else {
@@ -178,22 +168,23 @@ InfoVectorPair CompilationCacheScript::Lookup(
void CompilationCacheScript::Put(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutScript(
- table, source, context, language_mode, function_info, literals));
+ SetFirstTable(CompilationCacheTable::PutScript(table, source, context,
+ language_mode, function_info));
}
-InfoVectorPair CompilationCacheEval::Lookup(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> native_context, LanguageMode language_mode, int position) {
+InfoCellPair CompilationCacheEval::Lookup(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context,
+ LanguageMode language_mode,
+ int position) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
- InfoVectorPair result;
+ InfoCellPair result;
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
@@ -211,12 +202,13 @@ void CompilationCacheEval::Put(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
Handle<Context> native_context,
- Handle<Cell> literals, int position) {
+ Handle<FeedbackCell> feedback_cell,
+ int position) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
table =
CompilationCacheTable::PutEval(table, source, outer_info, function_info,
- native_context, literals, position);
+ native_context, feedback_cell, position);
SetFirstTable(table);
}
@@ -263,21 +255,22 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
script_.Remove(function_info);
}
-InfoVectorPair CompilationCache::LookupScript(
+MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- InfoVectorPair empty_result;
- if (!IsEnabled()) return empty_result;
+ if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
resource_options, context, language_mode);
}
-InfoVectorPair CompilationCache::LookupEval(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode, int position) {
- InfoVectorPair result;
+InfoCellPair CompilationCache::LookupEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context,
+ LanguageMode language_mode,
+ int position) {
+ InfoCellPair result;
if (!IsEnabled()) return result;
if (context->IsNativeContext()) {
@@ -302,29 +295,29 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
- script_.Put(source, context, language_mode, function_info, literals);
+ script_.Put(source, context, language_mode, function_info);
}
void CompilationCache::PutEval(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals, int position) {
+ Handle<FeedbackCell> feedback_cell,
+ int position) {
if (!IsEnabled()) return;
HandleScope scope(isolate());
if (context->IsNativeContext()) {
- eval_global_.Put(source, outer_info, function_info, context, literals,
+ eval_global_.Put(source, outer_info, function_info, context, feedback_cell,
position);
} else {
DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
eval_contextual_.Put(source, outer_info, function_info, native_context,
- literals, position);
+ feedback_cell, position);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 3c9751ac2f..0072d3b487 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -79,14 +79,16 @@ class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(Isolate* isolate);
- InfoVectorPair Lookup(Handle<String> source, MaybeHandle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
- Handle<Context> context, LanguageMode language_mode);
+ MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
+ MaybeHandle<Object> name,
+ int line_offset, int column_offset,
+ ScriptOriginOptions resource_options,
+ Handle<Context> context,
+ LanguageMode language_mode);
void Put(Handle<String> source, Handle<Context> context,
- LanguageMode language_mode, Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals);
+ LanguageMode language_mode,
+ Handle<SharedFunctionInfo> function_info);
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
@@ -114,14 +116,15 @@ class CompilationCacheEval: public CompilationSubCache {
explicit CompilationCacheEval(Isolate* isolate)
: CompilationSubCache(isolate, 1) {}
- InfoVectorPair Lookup(Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Context> native_context,
- LanguageMode language_mode, int position);
+ InfoCellPair Lookup(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
- Handle<Context> native_context, Handle<Cell> literals, int position);
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -152,19 +155,18 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- InfoVectorPair LookupScript(Handle<String> source, MaybeHandle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
- Handle<Context> context,
- LanguageMode language_mode);
+ MaybeHandle<SharedFunctionInfo> LookupScript(
+ Handle<String> source, MaybeHandle<Object> name, int line_offset,
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- InfoVectorPair LookupEval(Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode,
- int position);
+ InfoCellPair LookupEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, LanguageMode language_mode,
+ int position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -175,15 +177,14 @@ class CompilationCache {
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals);
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info, Handle<Cell> literals,
- int position);
+ Handle<SharedFunctionInfo> function_info,
+ Handle<FeedbackCell> feedback_cell, int position);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index 990d536e38..fa26e67b1a 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DEPENDENCIES_H_
-#define V8_DEPENDENCIES_H_
+#ifndef V8_COMPILATION_DEPENDENCIES_H_
+#define V8_COMPILATION_DEPENDENCIES_H_
#include "src/handles.h"
#include "src/objects.h"
@@ -71,4 +71,4 @@ class CompilationDependencies {
} // namespace internal
} // namespace v8
-#endif // V8_DEPENDENCIES_H_
+#endif // V8_COMPILATION_DEPENDENCIES_H_
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 27e6dbb9da..85d887ceb7 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -16,12 +16,9 @@
namespace v8 {
namespace internal {
-// TODO(mvstanton): the Code::OPTIMIZED_FUNCTION constant below is
-// bogus, it's just that I've eliminated Code::FUNCTION and there isn't
-// a "better" value to put in this place.
CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
FunctionLiteral* literal)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, BASE, zone) {
+ : CompilationInfo({}, AbstractCode::INTERPRETED_FUNCTION, zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this CompilationInfo. As such,
@@ -39,7 +36,7 @@ CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, OPTIMIZE, zone) {
+ : CompilationInfo({}, AbstractCode::OPTIMIZED_FUNCTION, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
@@ -47,6 +44,8 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+ if (!FLAG_turbo_disable_switch_jump_table) SetFlag(kSwitchJumpTableEnabled);
+ if (FLAG_untrusted_code_mitigations) MarkAsPoisoningRegisterArguments();
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
@@ -58,24 +57,27 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
CompilationInfo::CompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind)
- : CompilationInfo(debug_name, code_kind, STUB, zone) {}
+ : CompilationInfo(debug_name, static_cast<AbstractCode::Kind>(code_kind),
+ zone) {
+ if (code_kind == Code::BYTECODE_HANDLER && has_untrusted_code_mitigations()) {
+ SetFlag(CompilationInfo::kGenerateSpeculationPoisonOnEntry);
+ }
+}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- Code::Kind code_kind, Mode mode, Zone* zone)
+ AbstractCode::Kind code_kind, Zone* zone)
: literal_(nullptr),
source_range_map_(nullptr),
flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
code_kind_(code_kind),
stub_key_(0),
builtin_index_(Builtins::kNoBuiltinId),
- mode_(mode),
osr_offset_(BailoutId::None()),
feedback_vector_spec_(zone),
zone_(zone),
deferred_handles_(nullptr),
dependencies_(nullptr),
bailout_reason_(BailoutReason::kNoReason),
- parameter_count_(0),
optimization_id_(-1),
debug_name_(debug_name) {}
@@ -94,15 +96,15 @@ DeclarationScope* CompilationInfo::scope() const {
}
int CompilationInfo::num_parameters() const {
- return !IsStub() ? scope()->num_parameters() : parameter_count_;
+ DCHECK(!IsStub());
+ return scope()->num_parameters();
}
int CompilationInfo::num_parameters_including_this() const {
- return num_parameters() + (is_this_defined() ? 1 : 0);
+ DCHECK(!IsStub());
+ return scope()->num_parameters() + 1;
}
-bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-
void CompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
DCHECK_NULL(deferred_handles_);
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index bb5812002e..e68b6d88b4 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -45,12 +45,16 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kAccessorInliningEnabled = 1 << 3,
kFunctionContextSpecializing = 1 << 4,
kInliningEnabled = 1 << 5,
- kDisableFutureOptimization = 1 << 6,
- kSplittingEnabled = 1 << 7,
- kSourcePositionsEnabled = 1 << 8,
- kBailoutOnUninitialized = 1 << 9,
- kLoopPeelingEnabled = 1 << 10,
- kUntrustedCodeMitigations = 1 << 11,
+ kPoisonLoads = 1 << 6,
+ kDisableFutureOptimization = 1 << 7,
+ kSplittingEnabled = 1 << 8,
+ kSourcePositionsEnabled = 1 << 9,
+ kBailoutOnUninitialized = 1 << 10,
+ kLoopPeelingEnabled = 1 << 11,
+ kUntrustedCodeMitigations = 1 << 12,
+ kSwitchJumpTableEnabled = 1 << 13,
+ kGenerateSpeculationPoisonOnEntry = 1 << 14,
+ kPoisonRegisterArguments = 1 << 15,
};
// TODO(mtrofin): investigate if this might be generalized outside wasm, with
@@ -60,9 +64,9 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
struct WasmCodeDesc {
CodeDesc code_desc;
size_t safepoint_table_offset = 0;
+ size_t handler_table_offset = 0;
uint32_t frame_slot_count = 0;
Handle<ByteArray> source_positions_table;
- MaybeHandle<HandlerTable> handler_table;
};
// Construct a compilation info for unoptimized compilation.
@@ -99,7 +103,11 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
- Code::Kind code_kind() const { return code_kind_; }
+ AbstractCode::Kind abstract_code_kind() const { return code_kind_; }
+ Code::Kind code_kind() const {
+ DCHECK(code_kind_ < static_cast<AbstractCode::Kind>(Code::NUMBER_OF_KINDS));
+ return static_cast<Code::Kind>(code_kind_);
+ }
uint32_t stub_key() const { return stub_key_; }
void set_stub_key(uint32_t stub_key) { stub_key_ = stub_key; }
int32_t builtin_index() const { return builtin_index_; }
@@ -108,12 +116,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
JavaScriptFrame* osr_frame() const { return osr_frame_; }
int num_parameters() const;
int num_parameters_including_this() const;
- bool is_this_defined() const;
-
- void set_parameter_count(int parameter_count) {
- DCHECK(IsStub());
- parameter_count_ = parameter_count;
- }
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
@@ -154,6 +156,9 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+ void MarkAsPoisonLoads() { SetFlag(kPoisonLoads); }
+ bool is_poison_loads() const { return GetFlag(kPoisonLoads); }
+
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
@@ -169,6 +174,27 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return GetFlag(kUntrustedCodeMitigations);
}
+ bool switch_jump_table_enabled() const {
+ return GetFlag(kSwitchJumpTableEnabled);
+ }
+
+ bool is_generating_speculation_poison_on_entry() const {
+ bool enabled = GetFlag(kGenerateSpeculationPoisonOnEntry);
+ DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ return enabled;
+ }
+
+ void MarkAsPoisoningRegisterArguments() {
+ DCHECK(has_untrusted_code_mitigations());
+ SetFlag(kGenerateSpeculationPoisonOnEntry);
+ SetFlag(kPoisonRegisterArguments);
+ }
+ bool is_poisoning_register_arguments() const {
+ bool enabled = GetFlag(kPoisonRegisterArguments);
+ DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ return enabled;
+ }
+
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -193,9 +219,17 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
JSGlobalObject* global_object() const;
// Accessors for the different compilation modes.
- bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsStub() const { return mode_ == STUB; }
- bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
+ bool IsOptimizing() const {
+ return abstract_code_kind() == AbstractCode::OPTIMIZED_FUNCTION;
+ }
+ bool IsWasm() const {
+ return abstract_code_kind() == AbstractCode::WASM_FUNCTION;
+ }
+ bool IsStub() const {
+ return abstract_code_kind() != AbstractCode::OPTIMIZED_FUNCTION &&
+ abstract_code_kind() != AbstractCode::WASM_FUNCTION &&
+ abstract_code_kind() != AbstractCode::INTERPRETED_FUNCTION;
+ }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
@@ -275,15 +309,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
WasmCodeDesc* wasm_code_desc() { return &wasm_code_desc_; }
private:
- // Compilation mode.
- // BASE is generated by the full codegen, optionally prepared for bailouts.
- // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- enum Mode { BASE, OPTIMIZE, STUB };
-
- CompilationInfo(Vector<const char> debug_name, Code::Kind code_kind,
- Mode mode, Zone* zone);
-
- void SetMode(Mode mode) { mode_ = mode; }
+ CompilationInfo(Vector<const char> debug_name, AbstractCode::Kind code_kind,
+ Zone* zone);
void SetFlag(Flag flag) { flags_ |= flag; }
@@ -298,7 +325,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
unsigned flags_;
- Code::Kind code_kind_;
+ AbstractCode::Kind code_kind_;
uint32_t stub_key_;
int32_t builtin_index_;
@@ -310,8 +337,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Handle<Code> code_;
WasmCodeDesc wasm_code_desc_;
- // Compilation mode flag and whether deoptimization is allowed.
- Mode mode_;
+ // Entry point when compiling for OSR, {BailoutId::None} otherwise.
BailoutId osr_offset_;
// Holds the bytecode array generated by the interpreter.
@@ -338,9 +364,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
InlinedFunctionList inlined_functions_;
- // Number of parameters used for compilation of stubs that require arguments.
- int parameter_count_;
-
int optimization_id_;
// The current OSR frame for specialization or {nullptr}.
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index 388117b10e..1f70336fcc 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -90,4 +90,4 @@ std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& s);
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILATION_STATISTICS_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 1adfd090cd..27af96c85e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -92,7 +92,7 @@ MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
MemoryPressureTask::~MemoryPressureTask() {}
void MemoryPressureTask::RunInternal() {
- dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher_->AbortAll(BlockingBehavior::kDontBlock);
}
} // namespace
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index ee20e8d02e..240f025c1e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -72,8 +72,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
public:
typedef uintptr_t JobId;
- enum class BlockingBehavior { kBlock, kDontBlock };
-
CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size);
~CompilerDispatcher();
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 59872b2535..7dc73b146c 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -25,10 +25,10 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
- // TODO(mvstanton): We can't call EnsureLiterals here due to allocation,
- // but we probably shouldn't call set_code either, as this
+ // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
+ // allocation, but we probably shouldn't call set_code either, as this
// sometimes runs on the worker thread!
- // JSFunction::EnsureLiterals(function);
+ // JSFunction::EnsureFeedbackVector(function);
}
delete job;
}
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index d1d295f063..551b7c3563 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -23,8 +23,6 @@ class SharedFunctionInfo;
class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
public:
- enum class BlockingBehavior { kBlock, kDontBlock };
-
explicit OptimizingCompileDispatcher(Isolate* isolate)
: isolate_(isolate),
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index 74b2352bd8..23a607a093 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -251,7 +251,7 @@ void UnoptimizedCompileJob::Compile(bool on_background_thread) {
}
compilation_job_.reset(interpreter::Interpreter::NewCompilationJob(
- parse_info_.get(), parse_info_->literal(), allocator_));
+ parse_info_.get(), parse_info_->literal(), allocator_, nullptr));
if (!compilation_job_.get()) {
parse_info_->pending_error_handler()->set_stack_overflow();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e2f8ee0f39..9b25832668 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -10,7 +10,6 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
-#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/optional.h"
@@ -31,11 +30,13 @@
#include "src/messages.h"
#include "src/objects/map.h"
#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
+#include "src/unicode-cache.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -370,20 +371,9 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job,
return status;
}
-bool Renumber(ParseInfo* parse_info,
- Compiler::EagerInnerFunctionLiterals* eager_literals) {
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- parse_info->on_background_thread()
- ? RuntimeCallCounterId::kCompileBackgroundRenumber
- : RuntimeCallCounterId::kCompileRenumber);
- return AstNumbering::Renumber(parse_info->stack_limit(), parse_info->zone(),
- parse_info->literal(), eager_literals);
-}
-
-std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
+std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJobs(
ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator) {
+ AccountingAllocator* allocator, CompilationJobList* inner_function_jobs) {
if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
std::unique_ptr<CompilationJob> asm_job(
AsmJs::NewCompilationJob(parse_info, literal, allocator));
@@ -396,14 +386,27 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
// with a validation error or another error that could be solve by falling
// through to standard unoptimized compile.
}
+ ZoneVector<FunctionLiteral*> eager_inner_literals(0, parse_info->zone());
std::unique_ptr<CompilationJob> job(
- interpreter::Interpreter::NewCompilationJob(parse_info, literal,
- allocator));
+ interpreter::Interpreter::NewCompilationJob(
+ parse_info, literal, allocator, &eager_inner_literals));
- if (job->ExecuteJob() == CompilationJob::SUCCEEDED) {
- return job;
+ if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
+ // Compilation failed, return null.
+ return std::unique_ptr<CompilationJob>();
}
- return std::unique_ptr<CompilationJob>(); // Compilation failed, return null.
+
+ // Recursively compile eager inner literals.
+ for (FunctionLiteral* inner_literal : eager_inner_literals) {
+ std::unique_ptr<CompilationJob> inner_job(
+ PrepareAndExecuteUnoptimizedCompileJobs(
+ parse_info, inner_literal, allocator, inner_function_jobs));
+ // Compilation failed, return null.
+ if (!inner_job) return std::unique_ptr<CompilationJob>();
+ inner_function_jobs->emplace_front(std::move(inner_job));
+ }
+
+ return job;
}
std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
@@ -414,27 +417,16 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
DisallowHandleDereference no_deref;
DCHECK(inner_function_jobs->empty());
- Compiler::EagerInnerFunctionLiterals inner_literals;
- if (!Compiler::Analyze(parse_info, &inner_literals)) {
+ if (!Compiler::Analyze(parse_info)) {
return std::unique_ptr<CompilationJob>();
}
// Prepare and execute compilation of the outer-most function.
std::unique_ptr<CompilationJob> outer_function_job(
- PrepareAndExecuteUnoptimizedCompileJob(parse_info, parse_info->literal(),
- allocator));
+ PrepareAndExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
+ allocator, inner_function_jobs));
if (!outer_function_job) return std::unique_ptr<CompilationJob>();
- // Prepare and execute compilation jobs for eager inner functions.
- for (auto it : inner_literals) {
- FunctionLiteral* inner_literal = it->value();
- std::unique_ptr<CompilationJob> inner_job(
- PrepareAndExecuteUnoptimizedCompileJob(parse_info, inner_literal,
- allocator));
- if (!inner_job) return std::unique_ptr<CompilationJob>();
- inner_function_jobs->emplace_front(std::move(inner_job));
- }
-
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
@@ -491,7 +483,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
- if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (function->feedback_cell()->value()->IsFeedbackVector()) {
FeedbackVector* feedback_vector = function->feedback_vector();
feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
@@ -619,6 +611,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
function->ClearOptimizationMarker();
}
+ if (isolate->debug()->needs_check_on_function_call()) {
+ // Do not optimize when debugger needs to hook into every call.
+ return MaybeHandle<Code>();
+ }
+
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_offset)
.ToHandle(&cached_code)) {
@@ -771,6 +768,21 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
return CompilationJob::FAILED;
}
+bool FailWithPendingException(Isolate* isolate, ParseInfo* parse_info,
+ Compiler::ClearExceptionFlag flag) {
+ if (flag == Compiler::CLEAR_EXCEPTION) {
+ isolate->clear_pending_exception();
+ } else if (!isolate->has_pending_exception()) {
+ if (parse_info->pending_error_handler()->has_pending_error()) {
+ parse_info->pending_error_handler()->ReportErrors(
+ isolate, parse_info->script(), parse_info->ast_value_factory());
+ } else {
+ isolate->StackOverflow();
+ }
+ }
+ return false;
+}
+
MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
ParseInfo* parse_info, Isolate* isolate, CompilationJob* outer_function_job,
CompilationJobList* inner_function_jobs) {
@@ -792,7 +804,8 @@ MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
// Finalize compilation of the unoptimized bytecode or asm-js data.
if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
outer_function_job, inner_function_jobs)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
@@ -834,7 +847,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
@@ -842,14 +856,124 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
&inner_function_jobs);
}
-bool FailWithPendingException(Isolate* isolate,
- Compiler::ClearExceptionFlag flag) {
- if (flag == Compiler::CLEAR_EXCEPTION) {
- isolate->clear_pending_exception();
- } else if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
+std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
+ ParseInfo* parse_info, AccountingAllocator* allocator,
+ CompilationJobList* inner_function_jobs) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
+ : RuntimeCallCounterId::kCompileBackgroundScript);
+
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ parse_info->set_language_mode(
+ stricter_language_mode(parse_info->language_mode(), language_mode));
+
+ // Can't access scope info data off-main-thread.
+ DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
+
+ // Generate the unoptimized bytecode or asm-js data.
+ std::unique_ptr<CompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
+ return outer_function_job;
+}
+
+class BackgroundCompileTask : public ScriptCompiler::ScriptStreamingTask {
+ public:
+ BackgroundCompileTask(ScriptStreamingData* source, Isolate* isolate);
+
+ virtual void Run();
+
+ private:
+ ScriptStreamingData* source_; // Not owned.
+ int stack_size_;
+ ScriptData* script_data_;
+ AccountingAllocator* allocator_;
+ TimedHistogram* timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
+};
+
+BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
+ Isolate* isolate)
+ : source_(source),
+ stack_size_(i::FLAG_stack_size),
+ script_data_(nullptr),
+ timer_(isolate->counters()->compile_script_on_background()) {
+ VMState<PARSER> state(isolate);
+
+ // Prepare the data for the internalization phase and compilation phase, which
+ // will happen in the main thread after parsing.
+ ParseInfo* info = new ParseInfo(isolate->allocator());
+ info->InitFromIsolate(isolate);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
+ } else {
+ info->set_runtime_call_stats(nullptr);
+ }
+ info->set_toplevel();
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(source->source_stream.get(), source->encoding,
+ info->runtime_call_stats()));
+ info->set_character_stream(std::move(stream));
+ info->set_unicode_cache(&source_->unicode_cache);
+ info->set_allow_lazy_parsing();
+ if (V8_UNLIKELY(info->block_coverage_enabled())) {
+ info->AllocateSourceRangeMap();
}
- return false;
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ info->set_language_mode(
+ stricter_language_mode(info->language_mode(), language_mode));
+
+ source->info.reset(info);
+ allocator_ = isolate->allocator();
+
+ // Parser needs to stay alive for finalizing the parsing on the main
+ // thread.
+ source_->parser.reset(new Parser(source_->info.get()));
+ source_->parser->DeserializeScopeChain(source_->info.get(),
+ MaybeHandle<ScopeInfo>());
+}
+
+void BackgroundCompileTask::Run() {
+ TimedHistogramScope timer(timer_);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ source_->info->set_on_background_thread(true);
+
+ // Reset the stack limit of the parser to reflect correctly that we're on a
+ // background thread.
+ uintptr_t old_stack_limit = source_->info->stack_limit();
+ uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
+ source_->info->set_stack_limit(stack_limit);
+ source_->parser->set_stack_limit(stack_limit);
+
+ source_->parser->ParseOnBackground(source_->info.get());
+ if (FLAG_background_compile && source_->info->literal() != nullptr) {
+ // Parsing has succeeded, compile.
+ source_->outer_function_job = CompileTopLevelOnBackgroundThread(
+ source_->info.get(), allocator_, &source_->inner_function_jobs);
+ }
+
+ if (script_data_ != nullptr) {
+ source_->cached_data.reset(new ScriptCompiler::CachedData(
+ script_data_->data(), script_data_->length(),
+ ScriptCompiler::CachedData::BufferOwned));
+ script_data_->ReleaseDataOwnership();
+ delete script_data_;
+ script_data_ = nullptr;
+ }
+
+ source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
+
+ source_->info->set_on_background_thread(false);
+ source_->info->set_stack_limit(old_stack_limit);
}
} // namespace
@@ -857,8 +981,7 @@ bool FailWithPendingException(Isolate* isolate,
// ----------------------------------------------------------------------------
// Implementation of Compiler
-bool Compiler::Analyze(ParseInfo* parse_info,
- EagerInnerFunctionLiterals* eager_literals) {
+bool Compiler::Analyze(ParseInfo* parse_info) {
DCHECK_NOT_NULL(parse_info->literal());
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
@@ -866,8 +989,7 @@ bool Compiler::Analyze(ParseInfo* parse_info,
? RuntimeCallCounterId::kCompileBackgroundAnalyse
: RuntimeCallCounterId::kCompileAnalyse);
if (!Rewriter::Rewrite(parse_info)) return false;
- DeclarationScope::Analyze(parse_info);
- if (!Renumber(parse_info, eager_literals)) return false;
+ if (!DeclarationScope::Analyze(parse_info)) return false;
return true;
}
@@ -897,18 +1019,19 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
+ // Set up parse info.
+ ParseInfo parse_info(shared_info);
+ parse_info.set_lazy_compile();
+
// Check if the compiler dispatcher has shared_info enqueued for compile.
CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
if (dispatcher->IsEnqueued(shared_info)) {
if (!dispatcher->FinishNow(shared_info)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
return true;
}
- // Set up parse info.
- ParseInfo parse_info(shared_info);
- parse_info.set_lazy_compile();
if (FLAG_preparser_scope_analysis) {
if (shared_info->HasPreParsedScopeData()) {
Handle<PreParsedScopeData> data(
@@ -922,7 +1045,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
// Parse and update ParseInfo with the results.
if (!parsing::ParseFunction(&parse_info, shared_info, isolate)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
// Generate the unoptimized bytecode or asm-js data.
@@ -930,7 +1053,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
&parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
// Internalize ast values onto the heap.
@@ -940,7 +1063,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (!FinalizeUnoptimizedCode(&parse_info, isolate, shared_info,
outer_function_job.get(),
&inner_function_jobs)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
DCHECK(!isolate->has_pending_exception());
@@ -962,8 +1085,8 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (!shared_info->is_compiled() && !Compile(shared_info, flag)) return false;
Handle<Code> code = handle(shared_info->code(), isolate);
- // Allocate literals for the JSFunction.
- JSFunction::EnsureLiterals(function);
+ // Allocate FeedbackVector for the JSFunction.
+ JSFunction::EnsureFeedbackVector(function);
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared()->HasAsmWasmData()) {
@@ -1075,27 +1198,28 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// is unused (just 0), which means it's an available field to use to indicate
// this separation. But to make sure we're not causing other false hits, we
// negate the scope position.
- int position = eval_scope_position;
if (FLAG_harmony_function_tostring &&
restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
parameters_end_pos != kNoSourcePosition) {
// use the parameters_end_pos as the eval_scope_position in the eval cache.
DCHECK_EQ(eval_scope_position, 0);
- position = -parameters_end_pos;
+ eval_scope_position = -parameters_end_pos;
}
CompilationCache* compilation_cache = isolate->compilation_cache();
- InfoVectorPair eval_result = compilation_cache->LookupEval(
- source, outer_info, context, language_mode, position);
- Handle<Cell> vector;
- if (eval_result.has_vector()) {
- vector = Handle<Cell>(eval_result.vector(), isolate);
+ InfoCellPair eval_result = compilation_cache->LookupEval(
+ source, outer_info, context, language_mode, eval_scope_position);
+ Handle<FeedbackCell> feedback_cell;
+ if (eval_result.has_feedback_cell()) {
+ feedback_cell = handle(eval_result.feedback_cell(), isolate);
}
Handle<SharedFunctionInfo> shared_info;
Handle<Script> script;
+ bool allow_eval_cache;
if (eval_result.has_shared()) {
shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
script = Handle<Script>(Script::cast(shared_info->script()), isolate);
+ allow_eval_cache = true;
} else {
script = isolate->factory()->NewScript(source);
if (isolate->NeedsSourcePositionsForProfiling()) {
@@ -1139,6 +1263,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!CompileToplevel(&parse_info, isolate).ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
}
+ allow_eval_cache = parse_info.allow_eval_cache();
}
// If caller is strict mode, the result must be in strict mode as well.
@@ -1146,27 +1271,32 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<JSFunction> result;
if (eval_result.has_shared()) {
- if (eval_result.has_vector()) {
+ if (eval_result.has_feedback_cell()) {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_info, context, vector, NOT_TENURED);
+ shared_info, context, feedback_cell, NOT_TENURED);
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, NOT_TENURED);
- JSFunction::EnsureLiterals(result);
- // Make sure to cache this result.
- Handle<Cell> new_vector(result->feedback_vector_cell(), isolate);
- compilation_cache->PutEval(source, outer_info, context, shared_info,
- new_vector, eval_scope_position);
+ JSFunction::EnsureFeedbackVector(result);
+ if (allow_eval_cache) {
+ // Make sure to cache this result.
+ Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(),
+ isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ new_feedback_cell, eval_scope_position);
+ }
}
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, NOT_TENURED);
- JSFunction::EnsureLiterals(result);
- // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
- // we didn't retrieve from there.
- Handle<Cell> vector(result->feedback_vector_cell(), isolate);
- compilation_cache->PutEval(source, outer_info, context, shared_info, vector,
- eval_scope_position);
+ JSFunction::EnsureFeedbackVector(result);
+ if (allow_eval_cache) {
+ // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
+ // we didn't retrieve from there.
+ Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(), isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ new_feedback_cell, eval_scope_position);
+ }
}
// OnAfterCompile has to be called after we create the JSFunction, which we
@@ -1228,15 +1358,6 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
return Handle<JSFunction>::cast(result);
}
-namespace {
-
-bool ShouldProduceCodeCache(ScriptCompiler::CompileOptions options) {
- return options == ScriptCompiler::kProduceCodeCache ||
- options == ScriptCompiler::kProduceFullCodeCache;
-}
-
-} // namespace
-
bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context,
Handle<String> source) {
@@ -1477,30 +1598,55 @@ struct ScriptCompileTimerScope {
}
};
+Handle<Script> NewScript(Isolate* isolate, Handle<String> source,
+ Compiler::ScriptDetails script_details,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives) {
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
+ if (natives == NATIVES_CODE) {
+ script->set_type(Script::TYPE_NATIVE);
+ } else if (natives == EXTENSION_CODE) {
+ script->set_type(Script::TYPE_EXTENSION);
+ } else if (natives == INSPECTOR_CODE) {
+ script->set_type(Script::TYPE_INSPECTOR);
+ }
+ Handle<Object> script_name;
+ if (script_details.name_obj.ToHandle(&script_name)) {
+ script->set_name(*script_name);
+ script->set_line_offset(script_details.line_offset);
+ script->set_column_offset(script_details.column_offset);
+ }
+ script->set_origin_options(origin_options);
+ Handle<Object> source_map_url;
+ if (script_details.source_map_url.ToHandle(&source_map_url)) {
+ script->set_source_mapping_url(*source_map_url);
+ }
+ Handle<FixedArray> host_defined_options;
+ if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
+ script->set_host_defined_options(*host_defined_options);
+ }
+ return script;
+}
+
} // namespace
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
- Handle<String> source, MaybeHandle<Object> maybe_script_name,
- int line_offset, int column_offset, ScriptOriginOptions resource_options,
- MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
- ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives,
- MaybeHandle<FixedArray> maybe_host_defined_options) {
+ Handle<String> source, const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, v8::Extension* extension,
+ ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
cached_data = nullptr;
- } else if (compile_options == ScriptCompiler::kProduceParserCache ||
- ShouldProduceCodeCache(compile_options)) {
- DCHECK(cached_data && !*cached_data);
- DCHECK_NULL(extension);
- DCHECK(!isolate->debug()->is_loaded());
} else {
- DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
- compile_options == ScriptCompiler::kConsumeCodeCache);
+ DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
DCHECK(cached_data && *cached_data);
DCHECK_NULL(extension);
}
@@ -1513,7 +1659,6 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Do a lookup in the compilation cache but not for extensions.
MaybeHandle<SharedFunctionInfo> maybe_result;
- Handle<Cell> vector;
if (extension == nullptr) {
bool can_consume_code_cache =
compile_options == ScriptCompiler::kConsumeCodeCache &&
@@ -1523,10 +1668,13 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
// First check per-isolate compilation cache.
- InfoVectorPair pair = compilation_cache->LookupScript(
- source, maybe_script_name, line_offset, column_offset, resource_options,
- context, language_mode);
- if (can_consume_code_cache && !pair.has_shared()) {
+ maybe_result = compilation_cache->LookupScript(
+ source, script_details.name_obj, script_details.line_offset,
+ script_details.column_offset, origin_options, isolate->native_context(),
+ language_mode);
+ if (!maybe_result.is_null()) {
+ compile_timer.set_hit_isolate_cache();
+ } else if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
@@ -1539,196 +1687,137 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
.ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
DCHECK(inner_result->is_compiled());
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, inner_result);
- vector = isolate->factory()->NewCell(feedback_vector);
- compilation_cache->PutScript(source, context, language_mode,
- inner_result, vector);
+ compilation_cache->PutScript(source, isolate->native_context(),
+ language_mode, inner_result);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
- isolate->debug()->OnAfterCompile(script);
if (isolate->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(script);
}
- return inner_result;
- }
- // Deserializer failed. Fall through to compile.
- compile_timer.set_consuming_code_cache_failed();
- } else {
- if (pair.has_shared()) {
- maybe_result = MaybeHandle<SharedFunctionInfo>(pair.shared(), isolate);
- compile_timer.set_hit_isolate_cache();
- }
- if (pair.has_vector()) {
- vector = Handle<Cell>(pair.vector(), isolate);
+ maybe_result = inner_result;
+ } else {
+ // Deserializer failed. Fall through to compile.
+ compile_timer.set_consuming_code_cache_failed();
}
}
}
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization && ShouldProduceCodeCache(compile_options)) {
- timer.Start();
- }
+ if (maybe_result.is_null()) {
+ // No cache entry found compile the script.
+ Handle<Script> script =
+ NewScript(isolate, source, script_details, origin_options, natives);
- if (maybe_result.is_null() || ShouldProduceCodeCache(compile_options)) {
- // No cache entry found, or embedder wants a code cache. Compile the script.
-
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
- if (natives == NATIVES_CODE) {
- script->set_type(Script::TYPE_NATIVE);
- } else if (natives == EXTENSION_CODE) {
- script->set_type(Script::TYPE_EXTENSION);
- } else if (natives == INSPECTOR_CODE) {
- script->set_type(Script::TYPE_INSPECTOR);
- }
- Handle<Object> script_name;
- if (maybe_script_name.ToHandle(&script_name)) {
- script->set_name(*script_name);
- script->set_line_offset(line_offset);
- script->set_column_offset(column_offset);
- }
- script->set_origin_options(resource_options);
- Handle<Object> source_map_url;
- if (maybe_source_map_url.ToHandle(&source_map_url)) {
- script->set_source_mapping_url(*source_map_url);
- }
- Handle<FixedArray> host_defined_options;
- if (maybe_host_defined_options.ToHandle(&host_defined_options)) {
- script->set_host_defined_options(*host_defined_options);
- }
-
- // Compile the function and add it to the cache.
+ // Compile the function and add it to the isolate cache.
ParseInfo parse_info(script);
Zone compile_zone(isolate->allocator(), ZONE_NAME);
- if (resource_options.IsModule()) parse_info.set_module();
- if (compile_options != ScriptCompiler::kNoCompileOptions) {
- parse_info.set_cached_data(cached_data);
- }
- parse_info.set_compile_options(compile_options);
+ if (origin_options.IsModule()) parse_info.set_module();
parse_info.set_extension(extension);
- if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info()));
- }
- parse_info.set_eager(
- (compile_options == ScriptCompiler::kProduceFullCodeCache) ||
- (compile_options == ScriptCompiler::kEagerCompile));
+ parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
maybe_result = CompileToplevel(&parse_info, isolate);
Handle<SharedFunctionInfo> result;
if (extension == nullptr && maybe_result.ToHandle(&result)) {
- // We need a feedback vector.
DCHECK(result->is_compiled());
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, result);
- vector = isolate->factory()->NewCell(feedback_vector);
- compilation_cache->PutScript(source, context, language_mode, result,
- vector);
- if (ShouldProduceCodeCache(compile_options) &&
- !script->ContainsAsmModule()) {
- compile_timer.set_producing_code_cache();
-
- HistogramTimerScope histogram_timer(
- isolate->counters()->compile_serialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileSerialize);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileSerialize");
- *cached_data = CodeSerializer::Serialize(isolate, result, source);
- if (FLAG_profile_deserialization) {
- PrintF("[Compiling and serializing took %0.3f ms]\n",
- timer.Elapsed().InMillisecondsF());
- }
- }
+ compilation_cache->PutScript(source, isolate->native_context(),
+ language_mode, result);
+ } else if (maybe_result.is_null() && natives != EXTENSION_CODE &&
+ natives != NATIVES_CODE) {
+ isolate->ReportPendingMessages();
}
+ }
- if (maybe_result.is_null()) {
- if (natives != EXTENSION_CODE && natives != NATIVES_CODE) {
- isolate->ReportPendingMessages();
- }
- } else {
- isolate->debug()->OnAfterCompile(script);
- }
+ // On success, report script compilation to debugger.
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ isolate->debug()->OnAfterCompile(handle(Script::cast(result->script())));
}
+
return maybe_result;
}
-std::unique_ptr<CompilationJob> Compiler::CompileTopLevelOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
- : RuntimeCallCounterId::kCompileBackgroundScript);
-
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- parse_info->set_language_mode(
- stricter_language_mode(parse_info->language_mode(), language_mode));
-
- // Can't access scope info data off-main-thread.
- DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
-
- // Generate the unoptimized bytecode or asm-js data.
- std::unique_ptr<CompilationJob> outer_function_job(
- GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
- return outer_function_job;
+ScriptCompiler::ScriptStreamingTask* Compiler::NewBackgroundCompileTask(
+ ScriptStreamingData* source, Isolate* isolate) {
+ return new BackgroundCompileTask(source, isolate);
}
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForBackgroundCompile(
- Handle<Script> script, ParseInfo* parse_info, int source_length,
- CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs) {
- Isolate* isolate = script->GetIsolate();
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForStreamedScript(
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data) {
+ Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
PostponeInterruptsScope postpone(isolate);
- // TODO(titzer): increment the counters in caller.
+ int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- if (outer_function_job == nullptr) {
- // Compilation failed on background thread - throw an exception.
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>();
- }
-
- Handle<SharedFunctionInfo> result;
- if (FinalizeTopLevel(parse_info, isolate, outer_function_job,
- inner_function_jobs)
- .ToHandle(&result)) {
- isolate->debug()->OnAfterCompile(script);
- }
- return result;
-}
+ ParseInfo* parse_info = streaming_data->info.get();
+ parse_info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
- Handle<Script> script, ParseInfo* parse_info, int source_length) {
- Isolate* isolate = script->GetIsolate();
- ScriptCompileTimerScope compile_timer(
- isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
- // TODO(titzer): increment the counters in caller.
- isolate->counters()->total_load_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
+ // Check if compile cache already holds the SFI, if so no need to finalize
+ // the code compiled on the background thread.
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ MaybeHandle<SharedFunctionInfo> maybe_result =
+ compilation_cache->LookupScript(
+ source, script_details.name_obj, script_details.line_offset,
+ script_details.column_offset, origin_options,
+ isolate->native_context(), parse_info->language_mode());
+ if (!maybe_result.is_null()) {
+ compile_timer.set_hit_isolate_cache();
+ }
+
+ if (maybe_result.is_null()) {
+ // No cache entry found, finalize compilation of the script and add it to
+ // the isolate cache.
+ Handle<Script> script = NewScript(isolate, source, script_details,
+ origin_options, NOT_NATIVES_CODE);
+ parse_info->set_script(script);
+ streaming_data->parser->UpdateStatistics(isolate, script);
+ streaming_data->parser->HandleSourceURLComments(isolate, script);
+
+ if (parse_info->literal() == nullptr) {
+ // Parsing has failed - report error messages.
+ parse_info->pending_error_handler()->ReportErrors(
+ isolate, script, parse_info->ast_value_factory());
+ } else {
+ // Parsing has succeeded - finalize compilation.
+ if (i::FLAG_background_compile) {
+ // Finalize background compilation.
+ if (streaming_data->outer_function_job) {
+ maybe_result = FinalizeTopLevel(
+ parse_info, isolate, streaming_data->outer_function_job.get(),
+ &streaming_data->inner_function_jobs);
+ } else {
+ // Compilation failed on background thread - throw an exception.
+ FailWithPendingException(
+ isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ }
+ } else {
+ // Compilation on main thread.
+ maybe_result = CompileToplevel(parse_info, isolate);
+ }
+ }
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- parse_info->set_language_mode(
- stricter_language_mode(parse_info->language_mode(), language_mode));
+ // Add compiled code to the isolate cache.
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ compilation_cache->PutScript(source, isolate->native_context(),
+ parse_info->language_mode(), result);
+ }
+ }
+ // On success, report script compilation to debugger.
Handle<SharedFunctionInfo> result;
- if (CompileToplevel(parse_info, isolate).ToHandle(&result)) {
- isolate->debug()->OnAfterCompile(script);
+ if (maybe_result.ToHandle(&result)) {
+ isolate->debug()->OnAfterCompile(handle(Script::cast(result->script())));
}
- return result;
+
+ streaming_data->Release();
+ return maybe_result;
}
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
@@ -1789,8 +1878,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() && !shared->HasAsmWasmData() &&
shared->is_compiled()) {
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
if (!function->IsOptimized()) {
// Only mark for optimization if we don't already have optimized code.
@@ -1801,8 +1889,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
if (shared->is_compiled() && !shared->HasAsmWasmData()) {
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
Code* code = function->feedback_vector()->optimized_code();
if (code != nullptr) {
@@ -1814,5 +1901,22 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
}
+// ----------------------------------------------------------------------------
+// Implementation of ScriptStreamingData
+
+ScriptStreamingData::ScriptStreamingData(
+ ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding)
+ : source_stream(source_stream), encoding(encoding) {}
+
+ScriptStreamingData::~ScriptStreamingData() {}
+
+void ScriptStreamingData::Release() {
+ parser.reset();
+ info.reset();
+ outer_function_job.reset();
+ inner_function_jobs.clear();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index b84134c14e..ca6b0893d0 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -13,6 +13,7 @@
#include "src/code-events.h"
#include "src/contexts.h"
#include "src/isolate.h"
+#include "src/unicode-cache.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -23,11 +24,9 @@ class CompilationInfo;
class CompilationJob;
class JavaScriptFrame;
class ParseInfo;
+class Parser;
class ScriptData;
-template <typename T>
-class ThreadedList;
-template <typename T>
-class ThreadedListZoneEntry;
+struct ScriptStreamingData;
typedef std::forward_list<std::unique_ptr<CompilationJob>> CompilationJobList;
@@ -57,11 +56,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
- // Compile top level code on a background thread. Should be finalized by
- // GetSharedFunctionInfoForBackgroundCompile.
- static std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs);
+ // Creates a new task that when run will parse and compile the streamed
+ // script associated with |streaming_data| and can be finalized with
+ // Compiler::GetSharedFunctionInfoForStreamedScript.
+ // Note: does not take ownership of streaming_data.
+ static ScriptCompiler::ScriptStreamingTask* NewBackgroundCompileTask(
+ ScriptStreamingData* streaming_data, Isolate* isolate);
// Generate and install code from previously queued compilation job.
static bool FinalizeCompilationJob(CompilationJob* job, Isolate* isolate);
@@ -71,17 +71,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// offer this chance, optimized closure instantiation will not call this.
static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
- typedef ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>
- EagerInnerFunctionLiterals;
-
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* parse_info,
Handle<SharedFunctionInfo> shared_info,
Isolate* isolate);
- // Rewrite, analyze scopes, and renumber. If |eager_literals| is non-null, it
- // is appended with inner function literals which should be eagerly compiled.
- static bool Analyze(ParseInfo* parse_info,
- EagerInnerFunctionLiterals* eager_literals = nullptr);
+ // Rewrite and analyze scopes.
+ static bool Analyze(ParseInfo* parse_info);
// ===========================================================================
// The following family of methods instantiates new functions for scripts or
@@ -120,28 +115,34 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Handle<Context> context, Handle<String> source,
ParseRestriction restriction, int parameters_end_pos);
- // Create a shared function info object for a String source within a context.
+ struct ScriptDetails {
+ ScriptDetails() : line_offset(0), column_offset(0) {}
+ explicit ScriptDetails(Handle<Object> script_name)
+ : line_offset(0), column_offset(0), name_obj(script_name) {}
+
+ int line_offset;
+ int column_offset;
+ i::MaybeHandle<i::Object> name_obj;
+ i::MaybeHandle<i::Object> source_map_url;
+ i::MaybeHandle<i::FixedArray> host_defined_options;
+ };
+
+ // Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
- Handle<String> source, MaybeHandle<Object> maybe_script_name,
- int line_offset, int column_offset, ScriptOriginOptions resource_options,
- MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, v8::Extension* extension,
+ ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
- NativesFlag is_natives_code,
- MaybeHandle<FixedArray> maybe_host_defined_options);
-
- // Create a shared function info object for a Script that has already been
- // parsed while the script was being loaded from a streamed source.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
- Handle<Script> script, ParseInfo* info, int source_length);
+ NativesFlag is_natives_code);
- // Create a shared function info object for a Script that has already been
- // compiled on a background thread.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForBackgroundCompile(
- Handle<Script> script, ParseInfo* parse_info, int source_length,
- CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs);
+ // Create a shared function info object for a Script source that has already
+ // been parsed and possibly compiled on a background thread while being loaded
+ // from a streamed source. On return, the data held by |streaming_data| will
+ // have been released, however the object itself isn't freed and is still
+ // owned by the caller.
+ static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data);
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
@@ -246,6 +247,34 @@ class V8_EXPORT_PRIVATE CompilationJob {
}
};
+// Contains all data which needs to be transmitted between threads for
+// background parsing and compiling and finalizing it on the main thread.
+struct ScriptStreamingData {
+ ScriptStreamingData(ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding);
+ ~ScriptStreamingData();
+
+ void Release();
+
+ // Internal implementation of v8::ScriptCompiler::StreamedSource.
+ std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
+ ScriptCompiler::StreamedSource::Encoding encoding;
+ std::unique_ptr<ScriptCompiler::CachedData> cached_data;
+
+ // Data needed for parsing, and data needed to to be passed between thread
+ // between parsing and compilation. These need to be initialized before the
+ // compilation starts.
+ UnicodeCache unicode_cache;
+ std::unique_ptr<ParseInfo> info;
+ std::unique_ptr<Parser> parser;
+
+ // Data needed for finalizing compilation after background compilation.
+ std::unique_ptr<CompilationJob> outer_function_job;
+ CompilationJobList inner_function_jobs;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptStreamingData);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 2e9052e0c3..f250db84b9 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -5,16 +5,16 @@ jarin@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
danno@chromium.org
+sigurds@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
# For backend
bbudge@chromium.org
-mtrofin@chromium.org
+gdeepti@chromium.org
per-file wasm-*=ahaas@chromium.org
-per-file wasm-*=bbudge@chromium.org
per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 13d6801c32..e187d7170c 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -164,8 +164,8 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
}
// static
-FieldAccess AccessBuilder::ForJSFunctionFeedbackVector() {
- FieldAccess access = {kTaggedBase, JSFunction::kFeedbackVectorOffset,
+FieldAccess AccessBuilder::ForJSFunctionFeedbackCell() {
+ FieldAccess access = {kTaggedBase, JSFunction::kFeedbackCellOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -289,12 +289,12 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
}
// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting() {
FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
+ kTaggedBase, JSAsyncGeneratorObject::kIsAwaitingOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -1001,6 +1001,10 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
MachineType::Float64(), kNoWriteBarrier};
return access;
}
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
+ // TODO(neis/jkummerow): Define appropriate types.
+ UNIMPLEMENTED();
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index a2ce1f800b..fb8535c167 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -70,8 +70,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
- // Provides access to JSFunction::feedback_vector() field.
- static FieldAccess ForJSFunctionFeedbackVector();
+ // Provides access to JSFunction::feedback_cell() field.
+ static FieldAccess ForJSFunctionFeedbackCell();
// Provides access to JSFunction::code() field.
static FieldAccess ForJSFunctionCode();
@@ -109,8 +109,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSAsyncGeneratorObject::queue() field.
static FieldAccess ForJSAsyncGeneratorObjectQueue();
- // Provides access to JSAsyncGeneratorObject::awaited_promise() field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
+ // Provides access to JSAsyncGeneratorObject::is_awaiting() field.
+ static FieldAccess ForJSAsyncGeneratorObjectIsAwaiting();
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 9b0c4b41b1..c1254e4cdb 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -25,7 +25,11 @@ bool CanInlineElementAccess(Handle<Map> map) {
if (map->has_indexed_interceptor()) return false;
ElementsKind const elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
- if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
+ if (IsFixedTypedArrayElementsKind(elements_kind) &&
+ elements_kind != BIGUINT64_ELEMENTS &&
+ elements_kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
return false;
}
@@ -533,6 +537,18 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
+bool AccessInfoFactory::ComputePropertyAccessInfo(
+ MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
+ PropertyAccessInfo* access_info) {
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ if (ComputePropertyAccessInfos(maps, name, access_mode, &access_infos) &&
+ access_infos.size() == 1) {
+ *access_info = access_infos.front();
+ return true;
+ }
+ return false;
+}
+
bool AccessInfoFactory::ComputePropertyAccessInfos(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index dcdb0f35f0..54d402738b 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -149,6 +149,9 @@ class AccessInfoFactory final {
bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
AccessMode access_mode,
PropertyAccessInfo* access_info);
+ bool ComputePropertyAccessInfo(MapHandles const& maps, Handle<Name> name,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info);
bool ComputePropertyAccessInfos(MapHandles const& maps, Handle<Name> name,
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos);
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index a238cf29d4..8636c639e0 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -21,9 +21,6 @@ namespace compiler {
#define __ tasm()->
-#define kScratchReg r9
-
-
// Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter final : public InstructionOperandConverter {
public:
@@ -33,7 +30,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_branch_and_poison:
case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
return SetCC;
@@ -44,21 +43,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
Operand InputImmediate(size_t index) {
- Constant constant = ToConstant(instr_->InputAt(index));
- switch (constant.type()) {
- case Constant::kInt32:
- return Operand(constant.ToInt32());
- case Constant::kFloat32:
- return Operand::EmbeddedNumber(constant.ToFloat32());
- case Constant::kFloat64:
- return Operand::EmbeddedNumber(constant.ToFloat64().value());
- case Constant::kInt64:
- case Constant::kExternalReference:
- case Constant::kHeapObject:
- case Constant::kRpoNumber:
- break;
- }
- UNREACHABLE();
+ return ToImmediate(instr_->InputAt(index));
}
Operand InputOperand2(size_t first_index) {
@@ -124,6 +109,30 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index);
}
+ Operand ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ if (RelocInfo::IsWasmReference(constant.rmode())) {
+ return Operand(constant.ToInt32(), constant.rmode());
+ } else {
+ return Operand(constant.ToInt32());
+ }
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kInt64:
+ case Constant::kHeapObject:
+ // TODO(dcarney): loading RPO constants on arm.
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ }
+
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
@@ -314,6 +323,17 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ ArmOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
+ }
+}
+
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -571,28 +591,54 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. compute the offset of the {CodeDataContainer} from our current location
-// and load it.
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- int pc_offset = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset -
- (Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
- // We can use the register pc - 8 for the address of the current instruction.
- __ ldr_pcrel(ip, offset);
- __ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
- __ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(kJavaScriptCallCodeStartRegister, scratch);
+ __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
+ __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
+ __ csdb();
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ and_(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -611,9 +657,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ add(ip, i.InputRegister(0),
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ add(scratch, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ __ Call(scratch);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -657,9 +705,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ add(ip, i.InputRegister(0),
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ add(scratch, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ Jump(scratch);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -701,14 +751,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
- __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ cmp(cp, kScratchReg);
+ __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, scratch);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
@@ -1154,7 +1207,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1166,7 +1219,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1178,7 +1231,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1354,35 +1407,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32S32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32U32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64S32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
@@ -1393,7 +1451,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
@@ -1404,14 +1463,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F64: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1453,10 +1514,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1464,9 +1527,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1474,6 +1539,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
@@ -1629,6 +1695,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArmDsbIsb: {
+ __ dsb(SY);
+ __ isb(SY);
+ break;
+ }
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2202,41 +2273,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon32, dst, kScratchQuadReg); // dst = [0, 2, 4, 6]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon32, dst, scratch); // dst = [0, 2, 4, 6]
break;
}
case kArmS32x4UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon32, kScratchQuadReg, dst); // dst = [1, 3, 5, 7]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon32, scratch, dst); // dst = [1, 3, 5, 7]
break;
}
case kArmS32x4TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon32, dst, kScratchQuadReg); // dst = [0, 4, 2, 6]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon32, dst, scratch); // dst = [0, 4, 2, 6]
break;
}
case kArmS32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
if (dst == src0) {
- __ vmov(kScratchQuadReg, src0);
- src0 = kScratchQuadReg;
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmov(scratch, src0);
+ src0 = scratch;
} else if (dst == src1) {
- __ vmov(kScratchQuadReg, src1);
- src1 = kScratchQuadReg;
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmov(scratch, src1);
+ src1 = scratch;
}
// Perform shuffle as a vmov per lane.
int dst_code = dst.code() * 4;
@@ -2258,10 +2338,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon32, kScratchQuadReg, dst); // dst = [1, 5, 3, 7]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon32, scratch, dst); // dst = [1, 5, 3, 7]
break;
}
case kArmS16x8ZipLeft: {
@@ -2285,37 +2367,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon16, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 14]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon16, dst, scratch); // dst = [0, 2, 4, 6, ... 14]
break;
}
case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon16, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 15]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon16, scratch, dst); // dst = [1, 3, 5, 7, ... 15]
break;
}
case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon16, dst, kScratchQuadReg); // dst = [0, 8, 2, 10, ... 14]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon16, dst, scratch); // dst = [0, 8, 2, 10, ... 14]
break;
}
case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon16, scratch, dst); // dst = [1, 9, 3, 11, ... 15]
break;
}
case kArmS8x16ZipLeft: {
@@ -2339,37 +2429,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon8, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 30]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon8, dst, scratch); // dst = [0, 2, 4, 6, ... 30]
break;
}
case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon8, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 31]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon8, scratch, dst); // dst = [1, 3, 5, 7, ... 31]
break;
}
case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon8, dst, kScratchQuadReg); // dst = [0, 16, 2, 18, ... 30]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon8, dst, scratch); // dst = [0, 16, 2, 18, ... 30]
break;
}
case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon8, kScratchQuadReg, dst); // dst = [1, 17, 3, 19, ... 31]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon8, scratch, dst); // dst = [1, 17, 3, 19, ... 31]
break;
}
case kArmS8x16Concat: {
@@ -2382,12 +2480,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive.
int table_size = src0 == src1 ? 2 : 4;
DCHECK_IMPLIES(src0 != src1, src0.code() + 1 == src1.code());
- // The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
- int scratch_s_base = kScratchQuadReg.code() * 4;
+ // The shuffle lane mask is a byte mask, materialize in scratch.
+ int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
uint32_t four_lanes = i.InputUint32(2 + j);
// Ensure byte indices are in [0, 31] so masks are never NaNs.
@@ -2397,12 +2497,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
NeonListOperand table(table_base, table_size);
if (dst != src0 && dst != src1) {
- __ vtbl(dst.low(), table, kScratchQuadReg.low());
- __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ __ vtbl(dst.low(), table, scratch.low());
+ __ vtbl(dst.high(), table, scratch.high());
} else {
- __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
- __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
- __ vmov(dst, kScratchQuadReg);
+ __ vtbl(scratch.low(), table, scratch.low());
+ __ vtbl(scratch.high(), table, scratch.high());
+ __ vmov(dst, scratch);
}
break;
}
@@ -2432,149 +2532,156 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmS1x4AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU32, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmax(NeonU32, scratch, src.low(), src.high());
+ __ vpmax(NeonU32, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
break;
}
case kArmS1x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU32, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU32, scratch, src.low(), src.high());
+ __ vpmin(NeonU32, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
break;
}
case kArmS1x8AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU16, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmax(NeonU16, scratch, src.low(), src.high());
+ __ vpmax(NeonU16, scratch, scratch, scratch);
+ __ vpmax(NeonU16, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
break;
}
case kArmS1x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU16, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU16, scratch, src.low(), src.high());
+ __ vpmin(NeonU16, scratch, scratch, scratch);
+ __ vpmin(NeonU16, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
break;
}
case kArmS1x16AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU8, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- // vtst to detect any bits in the bottom 32 bits of kScratchDoubleReg.
+ UseScratchRegisterScope temps(tasm());
+ QwNeonRegister q_scratch = temps.AcquireQ();
+ DwVfpRegister d_scratch = q_scratch.low();
+ __ vpmax(NeonU8, d_scratch, src.low(), src.high());
+ __ vpmax(NeonU8, d_scratch, d_scratch, d_scratch);
+ // vtst to detect any bits in the bottom 32 bits of d_scratch.
// This saves an instruction vs. the naive sequence of vpmax.
// kDoubleRegZero is not changed, since it is 0.
- __ vtst(Neon32, kScratchQuadReg, kScratchQuadReg, kScratchQuadReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ __ vtst(Neon32, q_scratch, q_scratch, q_scratch);
+ __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
break;
}
case kArmS1x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU8, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU8, scratch, src.low(), src.high());
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -2607,6 +2714,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
+ FlagsConditionToCondition(condition));
+ __ csdb();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2654,8 +2775,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2725,9 +2847,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
frame->AlignSavedCalleeRegisterSlots();
}
@@ -2741,7 +2863,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
frame->AllocateSavedCalleeRegisterSlots(base::bits::CountPopulation(saves));
@@ -2749,14 +2871,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(lr, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2766,8 +2888,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2780,10 +2902,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm()) {
@@ -2861,8 +2984,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -2871,13 +2994,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ ldm(ia_w, sp, saves);
}
// Restore FP registers.
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
@@ -2889,7 +3012,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
ArmOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -2922,281 +3045,253 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(g.ToRegister(destination), src);
- } else {
- __ str(src, g.ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- MemOperand src = g.ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(g.ToRegister(destination), src);
- } else {
- Register temp = kScratchReg;
- __ ldr(temp, src);
- __ str(temp, g.ToMemOperand(destination));
- }
- } else if (source->IsConstant()) {
- Constant src = g.ToConstant(source);
- if (destination->IsRegister() || destination->IsStackSlot()) {
- Register dst =
- destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
- switch (src.type()) {
- case Constant::kInt32:
- if (RelocInfo::IsWasmReference(src.rmode())) {
- __ mov(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
- __ mov(dst, Operand(src.ToInt32()));
- }
- break;
- case Constant::kInt64:
- UNREACHABLE();
- break;
- case Constant::kFloat32:
- __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
- break;
- case Constant::kFloat64:
- __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
- break;
- case Constant::kExternalReference:
- __ mov(dst, Operand(src.ToExternalReference()));
- break;
- case Constant::kHeapObject: {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
- } else {
- __ Move(dst, src_object);
- }
- break;
- }
- case Constant::kRpoNumber:
- UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
- break;
- }
- if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
- } else if (src.type() == Constant::kFloat32) {
- if (destination->IsFloatStackSlot()) {
- MemOperand dst = g.ToMemOperand(destination);
- Register temp = kScratchReg;
- __ mov(temp, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ str(temp, dst);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ if (src.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
} else {
- SwVfpRegister dst = g.ToFloatRegister(destination);
- __ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
+ __ Move(dst, src_object);
}
} else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- DwVfpRegister dst = destination->IsFPRegister()
- ? g.ToDoubleRegister(destination)
- : kScratchDoubleReg;
- __ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsDoubleStackSlot()) {
- __ vstr(dst, g.ToMemOperand(destination));
- }
+ __ mov(dst, g.ToImmediate(source));
}
- } else if (source->IsFPRegister()) {
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ vstr(src, g.ToMemOperand(destination));
- }
- } else if (rep == MachineRepresentation::kFloat32) {
- // GapResolver may give us reg codes that don't map to actual s-registers.
- // Generate code to work around those cases.
- int src_code = LocationOperand::cast(source)->register_code();
- if (destination->IsFloatRegister()) {
+ };
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ mov(g.ToRegister(destination), g.ToRegister(source));
+ } else if (source->IsFloatRegister()) {
+ DCHECK(destination->IsFloatRegister());
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src_code);
+ } else if (source->IsDoubleRegister()) {
+ __ Move(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
- DCHECK(destination->IsFloatStackSlot());
- __ VmovExtended(g.ToMemOperand(destination), src_code);
+ __ Move(g.ToSimd128Register(destination), g.ToSimd128Register(source));
}
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- QwNeonRegister src = g.ToSimd128Register(source);
- if (destination->IsSimd128Register()) {
- QwNeonRegister dst = g.ToSimd128Register(destination);
- __ Move(dst, src);
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsRegister()) {
+ __ str(g.ToRegister(source), dst);
+ } else if (source->IsFloatRegister()) {
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ int src_code = LocationOperand::cast(source)->register_code();
+ __ VmovExtended(dst, src_code);
+ } else if (source->IsDoubleRegister()) {
+ __ vstr(g.ToDoubleRegister(source), dst);
} else {
- DCHECK(destination->IsSimd128StackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vst1(Neon8, NeonListOperand(src.low(), 2),
- NeonMemOperand(kScratchReg));
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ QwNeonRegister src = g.ToSimd128Register(source);
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(src.low(), 2), NeonMemOperand(temp));
}
- }
- } else if (source->IsFPStackSlot()) {
- MemOperand src = g.ToMemOperand(source);
- MachineRepresentation rep =
- LocationOperand::cast(destination)->representation();
- if (destination->IsFPRegister()) {
- if (rep == MachineRepresentation::kFloat64) {
- __ vldr(g.ToDoubleRegister(destination), src);
- } else if (rep == MachineRepresentation::kFloat32) {
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ MemOperand src = g.ToMemOperand(source);
+ if (source->IsStackSlot()) {
+ __ ldr(g.ToRegister(destination), src);
+ } else if (source->IsFloatStackSlot()) {
+ DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src);
+ } else if (source->IsDoubleStackSlot()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
QwNeonRegister dst = g.ToSimd128Register(destination);
- __ add(kScratchReg, src.rn(), Operand(src.offset()));
- __ vld1(Neon8, NeonListOperand(dst.low(), 2),
- NeonMemOperand(kScratchReg));
+ __ add(temp, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(dst.low(), 2), NeonMemOperand(temp));
}
- } else {
- DCHECK(destination->IsFPStackSlot());
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister temp = kScratchDoubleReg;
+ return;
+ }
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ UseScratchRegisterScope temps(tasm());
+ if (source->IsStackSlot() || source->IsFloatStackSlot()) {
+ SwVfpRegister temp = temps.AcquireS();
__ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
- } else if (rep == MachineRepresentation::kFloat32) {
- SwVfpRegister temp = kScratchDoubleReg.low();
+ __ vstr(temp, dst);
+ } else if (source->IsDoubleStackSlot()) {
+ DwVfpRegister temp = temps.AcquireD();
__ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
+ __ vstr(temp, dst);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ Register temp = temps.Acquire();
+ QwNeonRegister temp_q = temps.AcquireQ();
+ __ add(temp, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
+ }
+ return;
+ }
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
+ } else if (destination->IsFloatRegister()) {
+ __ vmov(g.ToFloatRegister(destination),
+ Float32::FromBits(src.ToFloat32AsInt()));
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- MemOperand dst = g.ToMemOperand(destination);
- __ add(kScratchReg, src.rn(), Operand(src.offset()));
- __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
+ // TODO(arm): Look into optimizing this further if possible. Supporting
+ // the NEON version of VMOV may help.
+ __ vmov(g.ToDoubleRegister(destination), src.ToFloat64());
}
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ if (destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ // Acquire a S register instead of a general purpose register in case
+ // `vstr` needs one to compute the address of `dst`.
+ SwVfpRegister s_temp = temps.AcquireS();
+ {
+ // TODO(arm): This sequence could be optimized further if necessary by
+ // writing the constant directly into `s_temp`.
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ MoveConstantToRegister(temp, src);
+ __ vmov(s_temp, temp);
+ }
+ __ vstr(s_temp, dst);
+ } else if (destination->IsFloatStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp = temps.AcquireS();
+ __ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
+ __ vstr(temp, dst);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp = temps.AcquireD();
+ // TODO(arm): Look into optimizing this further if possible. Supporting
+ // the NEON version of VMOV may help.
+ __ vmov(temp, src.ToFloat64());
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ mov(temp, src);
- __ ldr(src, dst);
- __ str(temp, dst);
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
- Register temp_0 = kScratchReg;
- SwVfpRegister temp_1 = kScratchDoubleReg.low();
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ ldr(temp_0, src);
- __ vldr(temp_1, dst);
- __ str(temp_0, dst);
- __ vstr(temp_1, src);
- } else if (source->IsFPRegister()) {
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- LowDwVfpRegister temp = kScratchDoubleReg;
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Swap(src, dst);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
- }
- } else if (rep == MachineRepresentation::kFloat32) {
- int src_code = LocationOperand::cast(source)->register_code();
- if (destination->IsFPRegister()) {
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Swap(g.ToRegister(source), g.ToRegister(destination));
+ } else if (source->IsFloatRegister()) {
+ DCHECK(destination->IsFloatRegister());
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ UseScratchRegisterScope temps(tasm());
+ LowDwVfpRegister temp = temps.AcquireLowD();
+ int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst_code);
__ VmovExtended(dst_code, temp.low().code());
+ } else if (source->IsDoubleRegister()) {
+ __ Swap(g.ToDoubleRegister(source), g.ToDoubleRegister(destination));
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
+ __ Swap(g.ToSimd128Register(source), g.ToSimd128Register(destination));
+ }
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp = temps.AcquireS();
+ __ vmov(temp, src);
+ __ ldr(src, dst);
+ __ vstr(temp, dst);
+ } else if (source->IsFloatRegister()) {
+ int src_code = LocationOperand::cast(source)->register_code();
+ UseScratchRegisterScope temps(tasm());
+ LowDwVfpRegister temp = temps.AcquireLowD();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
- }
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- QwNeonRegister src = g.ToSimd128Register(source);
- if (destination->IsFPRegister()) {
- QwNeonRegister dst = g.ToSimd128Register(destination);
- __ Swap(src, dst);
+ } else if (source->IsDoubleRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp = temps.AcquireD();
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(kScratchQuadReg, src);
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vld1(Neon8, NeonListOperand(src.low(), 2),
- NeonMemOperand(kScratchReg));
- __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
+ QwNeonRegister src = g.ToSimd128Register(source);
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ QwNeonRegister temp_q = temps.AcquireQ();
+ __ Move(temp_q, src);
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vld1(Neon8, NeonListOperand(src.low(), 2), NeonMemOperand(temp));
+ __ vst1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- Register temp_0 = kScratchReg;
- LowDwVfpRegister temp_1 = kScratchDoubleReg;
- MemOperand src0 = g.ToMemOperand(source);
- MemOperand dst0 = g.ToMemOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ vstr(temp_1.low(), src0);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- MemOperand src1(src0.rn(), src0.offset() + kDoubleSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize);
- __ vldr(kScratchQuadReg.low(), dst0);
- __ vldr(kScratchQuadReg.high(), src0);
- __ vstr(kScratchQuadReg.low(), src0);
- __ vstr(kScratchQuadReg.high(), dst0);
- __ vldr(kScratchQuadReg.low(), dst1);
- __ vldr(kScratchQuadReg.high(), src1);
- __ vstr(kScratchQuadReg.low(), src1);
- __ vstr(kScratchQuadReg.high(), dst1);
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsStackSlot() || source->IsFloatStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp_0 = temps.AcquireS();
+ SwVfpRegister temp_1 = temps.AcquireS();
+ __ vldr(temp_0, dst);
+ __ vldr(temp_1, src);
+ __ vstr(temp_0, src);
+ __ vstr(temp_1, dst);
+ } else if (source->IsDoubleStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp_0 = temps.AcquireD();
+ DwVfpRegister temp_1 = temps.AcquireD();
+ __ vldr(temp_0, dst);
+ __ vldr(temp_1, src);
+ __ vstr(temp_0, src);
+ __ vstr(temp_1, dst);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ MemOperand src0 = src;
+ MemOperand dst0 = dst;
+ MemOperand src1(src.rn(), src.offset() + kDoubleSize);
+ MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp_0 = temps.AcquireD();
+ DwVfpRegister temp_1 = temps.AcquireD();
+ __ vldr(temp_0, dst0);
+ __ vldr(temp_1, src0);
+ __ vstr(temp_0, src0);
+ __ vstr(temp_1, dst0);
+ __ vldr(temp_0, dst1);
+ __ vldr(temp_1, src1);
+ __ vstr(temp_0, src1);
+ __ vstr(temp_1, dst1);
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -3206,7 +3301,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
-#undef kScratchReg
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index a7cf80450a..a9f9be38ef 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -125,6 +125,7 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
+ V(ArmDsbIsb) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index a592515179..e538020f69 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -274,6 +274,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmStr:
case kArmPush:
case kArmPoke:
+ case kArmDsbIsb:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index f94d114d07..ef81c98716 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -460,11 +460,17 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1121,7 +1127,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmDsbIsb, g.NoOutput());
+}
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1157,6 +1166,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
}
+ break;
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mleft(m.left().node());
@@ -1175,6 +1185,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
return;
}
}
+ break;
}
default:
break;
@@ -1211,6 +1222,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
}
+ break;
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mright(m.right().node());
@@ -1229,6 +1241,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
return;
}
}
+ break;
}
default:
break;
@@ -1395,6 +1408,7 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RR_VISITOR_V8(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1403,6 +1417,7 @@ RR_OP_LIST(RR_VISITOR)
}
RR_OP_LIST_V8(RR_VISITOR_V8)
#undef RR_VISITOR_V8
+#undef RR_OP_LIST_V8
#define RRR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1410,6 +1425,7 @@ RR_OP_LIST_V8(RR_VISITOR_V8)
}
RRR_OP_LIST(RRR_VISITOR)
#undef RRR_VISITOR
+#undef RRR_OP_LIST
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1500,14 +1516,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
ArmOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1529,9 +1545,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
ArmOperandGenerator g(this);
int reverse_slot = 0;
@@ -1539,7 +1555,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1806,13 +1822,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, opcode, cont);
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1821,41 +1837,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1867,21 +1883,21 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (!result || selector->IsDefined(result)) {
+ if (!result || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
+ return VisitBinop(this, node, kArmAdd, kArmAdd, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
+ return VisitBinop(this, node, kArmSub, kArmRsb, cont);
case IrOpcode::kInt32MulWithOverflow:
// ARM doesn't set the overflow flag for multiplication, so we
// need to test on kNotEqual. Here is the code sequence used:
// smull resultlow, resulthigh, left, right
// cmp resulthigh, Operand(resultlow, ASR, 31)
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
default:
break;
}
@@ -1889,112 +1905,79 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(selector, value, kArmCmn, cont);
+ return VisitWordCompare(this, value, kArmCmn, cont);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, kArmCmp, cont);
+ return VisitWordCompare(this, value, kArmCmp, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kArmTst, cont);
+ return VisitWordCompare(this, value, kArmTst, cont);
case IrOpcode::kWord32Or:
- return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
+ return VisitBinop(this, value, kArmOrr, kArmOrr, cont);
case IrOpcode::kWord32Xor:
- return VisitWordCompare(selector, value, kArmTeq, cont);
+ return VisitWordCompare(this, value, kArmTeq, cont);
case IrOpcode::kWord32Sar:
- return VisitShift(selector, value, TryMatchASR, cont);
+ return VisitShift(this, value, TryMatchASR, cont);
case IrOpcode::kWord32Shl:
- return VisitShift(selector, value, TryMatchLSL, cont);
+ return VisitShift(this, value, TryMatchLSL, cont);
case IrOpcode::kWord32Shr:
- return VisitShift(selector, value, TryMatchLSR, cont);
+ return VisitShift(this, value, TryMatchLSR, cont);
case IrOpcode::kWord32Ror:
- return VisitShift(selector, value, TryMatchROR, cont);
+ return VisitShift(this, value, TryMatchROR, cont);
default:
break;
}
}
if (user->opcode() == IrOpcode::kWord32Equal) {
- return VisitWordCompare(selector, user, cont);
+ return VisitWordCompare(this, user, cont);
}
// Continuation could not be combined with a compare, emit compare against 0.
- ArmOperandGenerator g(selector);
+ ArmOperandGenerator g(this);
InstructionCode const opcode =
cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
+ EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- value_operand);
+ Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+ value_operand);
} else {
DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.UseImmediate(cont->trap_id()));
+ Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.UseImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
- index_operand, value_operand, g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
+ index_operand, value_operand, g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2006,7 +1989,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -2137,7 +2120,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2145,13 +2128,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2161,7 +2146,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2170,13 +2155,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2193,7 +2178,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2201,15 +2186,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2228,7 +2213,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2237,15 +2222,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2304,11 +2289,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2323,11 +2309,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4, 4) \
- V(16x8, 8) \
- V(8x16, 16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArmF32x4UConvertI32x4) \
@@ -2460,6 +2441,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
+#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2467,6 +2449,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
}
SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2474,6 +2457,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
+#undef SIMD_SHIFT_OP_LIST
#define SIMD_VISIT_BINOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2481,6 +2465,7 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitS128Select(Node* node) {
ArmOperandGenerator g(this);
@@ -2631,6 +2616,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSxtb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSxth, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -2642,7 +2639,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags;
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(SUDIV)) {
// The sdiv and udiv instructions correctly return 0 if the divisor is 0,
// but the fall-back implementation does not.
@@ -2676,6 +2674,9 @@ InstructionSelector::AlignmentRequirements() {
SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
index d47ca083ae..a741121e32 100644
--- a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
+++ b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
+#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
#include "src/eh-frame.h"
@@ -69,4 +69,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 147d85a171..a07236b859 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -256,8 +256,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
- return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
- offset.offset());
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -297,8 +296,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr, padreg);
- unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
- __ StackPointer());
+ unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -374,6 +372,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
+ : kSpeculationPoisonRegister.W();
+ codegen->tasm()->And(value, value, Operand(poison));
+ }
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -455,7 +466,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
- __ Mov(csp, fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -535,29 +546,27 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
}
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. compute the offset of the {CodeDataContainer} from our current location
-// and load it.
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
- {
- // Since we always emit a bailout check at the very beginning we can be
- // certain that the distance between here and the {CodeDataContainer} is
- // fixed and always in range of a load.
- int data_container_offset =
- (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
- DCHECK_GE(0, data_container_offset);
- DCHECK_EQ(0, data_container_offset % 4);
- InstructionAccurateScope scope(tasm());
- __ ldr_pcrel(scratch, data_container_offset >> 2);
- }
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
@@ -568,6 +577,29 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(scratch);
+ __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
+ __ Csetm(kSpeculationPoisonRegister, eq);
+ __ Csdb();
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+
+ __ Mov(scratch, sp);
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(scratch, scratch, kSpeculationPoisonRegister);
+ __ Mov(sp, scratch);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -680,9 +712,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(x10);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -799,7 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
- __ mov(i.OutputRegister(), tasm()->StackPointer());
+ __ mov(i.OutputRegister(), sp);
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
@@ -844,7 +877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base = offset.from_stack_pointer() ? __ StackPointer() : fp;
+ Register base = offset.from_stack_pointer() ? sp : fp;
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
@@ -1161,6 +1194,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Sxth32:
__ Sxth(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Sxtb:
+ __ Sxtb(i.OutputRegister(), i.InputRegister32(0));
+ break;
+ case kArm64Sxth:
+ __ Sxth(i.OutputRegister(), i.InputRegister32(0));
+ break;
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
@@ -1190,12 +1229,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
- // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
+ // Pseudo instruction handled in AssembleArchBranch.
break;
case kArm64Claim: {
int count = i.InputInt32(0);
DCHECK_EQ(count % 2, 0);
- __ AssertCspAligned();
+ __ AssertSpAligned();
if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
@@ -1493,33 +1532,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
__ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
@@ -1542,82 +1588,86 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kAtomicLoadInt8:
+ case kArm64DsbIsb:
+ __ Dsb(FullSystem, BarrierAll);
+ __ Isb();
+ break;
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr);
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
break;
ATOMIC_BINOP_CASE(Add, Add)
@@ -2097,6 +2147,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2108,6 +2159,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2119,6 +2171,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2130,6 +2183,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2147,6 +2201,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ CmovX(kSpeculationPoisonRegister, xzr,
+ FlagsConditionToCondition(condition));
+ __ Csdb();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2189,13 +2256,13 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
} else {
- DCHECK(csp.Is(__ StackPointer()));
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@@ -2267,11 +2334,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
@@ -2281,7 +2348,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
saved_count = saves.Count();
if (saved_count != 0) {
DCHECK_EQ(saved_count % 2, 0);
@@ -2290,29 +2357,29 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- __ AssertCspAligned();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ __ AssertSpAligned();
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
// The number of slots for returns has to be even to ensure the correct stack
// alignment.
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
if (frame_access_state()->has_frame()) {
// Link the frame
- if (descriptor->IsJSFunctionCall()) {
+ if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
} else {
__ Push(lr, fp);
- __ Mov(fp, __ StackPointer());
+ __ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -2328,6 +2395,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
if (info()->IsWasm() && shrink_slots > 128) {
@@ -2346,7 +2414,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ isolate())));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, shrink_slots * kPointerSize);
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(hs, &done);
}
@@ -2356,8 +2424,6 @@ void CodeGenerator::AssembleConstructFrame() {
// runtime call.
__ EnterFrame(StackFrame::WASM_COMPILED);
}
- DCHECK(__ StackPointer().Is(csp));
- __ AssertStackConsistency();
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@@ -2367,7 +2433,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
- __ AssertStackConsistency();
__ Bind(&done);
}
@@ -2380,9 +2445,9 @@ void CodeGenerator::AssembleConstructFrame() {
// frame-specific header information, i.e. claiming the extra slot that
// other platforms explicitly push for STUB (code object) frames and frames
// recording their argument count.
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallJSFunction:
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Claim(shrink_slots + 1); // Claim extra slot for argc.
__ Str(kJavaScriptCallArgCountRegister,
MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
@@ -2424,7 +2489,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
@@ -2434,19 +2499,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
__ PopCPURegList(saves);
// Restore fp registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
__ PopCPURegList(saves_fp);
unwinding_info_writer_.MarkBlockWillExit();
Arm64OperandConverter g(this, nullptr);
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
- if (descriptor->IsCFunctionCall()) {
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -2473,7 +2538,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ DropArguments(pop_reg);
}
- __ AssertCspAligned();
+ __ AssertSpAligned();
__ Ret();
}
@@ -2482,195 +2547,195 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ Mov(g.ToRegister(destination), src);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ if (src.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Mov(dst, src_object);
+ }
} else {
- __ Str(src, g.ToMemOperand(destination, tasm()));
+ __ Mov(dst, g.ToImmediate(source));
}
- } else if (source->IsStackSlot()) {
- MemOperand src = g.ToMemOperand(source, tasm());
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- if (destination->IsRegister()) {
- __ Ldr(g.ToRegister(destination), src);
- } else {
- UseScratchRegisterScope scope(tasm());
- Register temp = scope.AcquireX();
- __ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, tasm()));
- }
- } else if (source->IsConstant()) {
- Constant src = g.ToConstant(ConstantOperand::cast(source));
- if (destination->IsRegister() || destination->IsStackSlot()) {
- UseScratchRegisterScope scope(tasm());
- Register dst = destination->IsRegister() ? g.ToRegister(destination)
- : scope.AcquireX();
- if (src.type() == Constant::kHeapObject) {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
- } else {
- __ Mov(dst, src_object);
- }
+ };
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Mov(g.ToRegister(destination), g.ToRegister(source));
+ } else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
- __ Mov(dst, g.ToImmediate(source));
- }
- if (destination->IsStackSlot()) {
- __ Str(dst, g.ToMemOperand(destination, tasm()));
+ DCHECK(source->IsSimd128Register());
+ __ Mov(g.ToDoubleRegister(destination).Q(),
+ g.ToDoubleRegister(source).Q());
}
- } else if (src.type() == Constant::kFloat32) {
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination).S();
- __ Fmov(dst, src.ToFloat32());
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsRegister()) {
+ __ Str(g.ToRegister(source), dst);
} else {
- DCHECK(destination->IsFPStackSlot());
- if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ Str(wzr, g.ToMemOperand(destination, tasm()));
+ VRegister src = g.ToDoubleRegister(source);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Str(src, dst);
} else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireS();
- __ Fmov(temp, src.ToFloat32());
- __ Str(temp, g.ToMemOperand(destination, tasm()));
+ DCHECK(source->IsSimd128Register());
+ __ Str(src.Q(), dst);
}
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- __ Fmov(dst, src.ToFloat64().value());
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ MemOperand src = g.ToMemOperand(source, tasm());
+ if (destination->IsRegister()) {
+ __ Ldr(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- if (src.ToFloat64().AsUint64() == 0) {
- __ Str(xzr, g.ToMemOperand(destination, tasm()));
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
+ __ Ldr(dst, src);
} else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
- __ Fmov(temp, src.ToFloat64().value());
- __ Str(temp, g.ToMemOperand(destination, tasm()));
+ DCHECK(destination->IsSimd128Register());
+ __ Ldr(dst.Q(), src);
}
}
+ return;
}
- } else if (source->IsFPRegister()) {
- VRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (destination->IsSimd128Register()) {
- __ Mov(dst.Q(), src.Q());
- } else {
- __ Mov(dst, src);
- }
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (destination->IsSimd128StackSlot()) {
- __ Str(src.Q(), dst);
+ if (source->IsSimd128StackSlot()) {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireQ();
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
} else {
- __ Str(src, dst);
+ UseScratchRegisterScope scope(tasm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source, tasm());
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (destination->IsSimd128Register()) {
- __ Ldr(dst.Q(), src);
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
} else {
- __ Ldr(dst, src);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsFloatRegister()) {
+ __ Fmov(dst.S(), src.ToFloat32());
+ } else {
+ DCHECK(destination->IsDoubleRegister());
+ __ Fmov(dst, src.ToFloat64().value());
+ }
}
- } else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (destination->IsSimd128StackSlot()) {
- __ Ldr(temp.Q(), src);
- __ Str(temp.Q(), dst);
- } else {
- __ Ldr(temp, src);
+ if (destination->IsStackSlot()) {
+ UseScratchRegisterScope scope(tasm());
+ Register temp = scope.AcquireX();
+ MoveConstantToRegister(temp, src);
__ Str(temp, dst);
+ } else if (destination->IsFloatStackSlot()) {
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ Str(wzr, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireS();
+ __ Fmov(temp, src.ToFloat32());
+ __ Str(temp, dst);
+ }
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ if (src.ToFloat64().AsUint64() == 0) {
+ __ Str(xzr, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64().value());
+ __ Str(temp, dst);
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- UseScratchRegisterScope scope(tasm());
- Register temp = scope.AcquireX();
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Mov(temp, src);
- __ Mov(src, dst);
- __ Mov(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination, tasm());
- __ Mov(temp, src);
- __ Ldr(src, dst);
- __ Str(temp, dst);
- }
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
- UseScratchRegisterScope scope(tasm());
- VRegister temp_0 = scope.AcquireD();
- VRegister temp_1 = scope.AcquireD();
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
- if (source->IsSimd128StackSlot()) {
- __ Ldr(temp_0.Q(), src);
- __ Ldr(temp_1.Q(), dst);
- __ Str(temp_0.Q(), dst);
- __ Str(temp_1.Q(), src);
- } else {
- __ Ldr(temp_0, src);
- __ Ldr(temp_1, dst);
- __ Str(temp_0, dst);
- __ Str(temp_1, src);
- }
- } else if (source->IsFPRegister()) {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
- VRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (source->IsSimd128Register()) {
- __ Mov(temp.Q(), src.Q());
- __ Mov(src.Q(), dst.Q());
- __ Mov(dst.Q(), temp.Q());
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Swap(g.ToRegister(source), g.ToRegister(destination));
} else {
- __ Mov(temp, src);
- __ Mov(src, dst);
- __ Mov(dst, temp);
+ VRegister src = g.ToDoubleRegister(source);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Swap(src, dst);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ __ Swap(src.Q(), dst.Q());
+ }
}
- } else {
- DCHECK(destination->IsFPStackSlot());
+ return;
+ case MoveType::kRegisterToStack: {
+ UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (source->IsSimd128Register()) {
- __ Mov(temp.Q(), src.Q());
- __ Ldr(src.Q(), dst);
- __ Str(temp.Q(), dst);
- } else {
+ if (source->IsRegister()) {
+ Register temp = scope.AcquireX();
+ Register src = g.ToRegister(source);
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister src = g.ToDoubleRegister(source);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ VRegister temp = scope.AcquireD();
+ __ Mov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ VRegister temp = scope.AcquireQ();
+ __ Mov(temp, src.Q());
+ __ Ldr(src.Q(), dst);
+ __ Str(temp, dst);
+ }
}
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kStackToStack: {
+ UseScratchRegisterScope scope(tasm());
+ MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ VRegister temp_0 = scope.AcquireD();
+ VRegister temp_1 = scope.AcquireD();
+ if (source->IsSimd128StackSlot()) {
+ __ Ldr(temp_0.Q(), src);
+ __ Ldr(temp_1.Q(), dst);
+ __ Str(temp_0.Q(), dst);
+ __ Str(temp_1.Q(), src);
+ } else {
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -2680,7 +2745,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 820b55a99d..72218ce8fd 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -67,6 +67,8 @@ namespace compiler {
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
@@ -155,6 +157,7 @@ namespace compiler {
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64Str) \
+ V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index c2b0a4e386..5378cb2f9c 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -67,7 +67,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ror:
case kArm64Ror32:
case kArm64Mov32:
+ case kArm64Sxtb:
case kArm64Sxtb32:
+ case kArm64Sxth:
case kArm64Sxth32:
case kArm64Sxtw:
case kArm64Sbfx32:
@@ -306,6 +308,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Strh:
case kArm64StrW:
case kArm64Str:
+ case kArm64DsbIsb:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index d6082c9f0a..0787ccdc0f 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -629,9 +629,16 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
EmitLoad(this, node, opcode, immediate_mode, rep);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1238,6 +1245,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RRR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1245,6 +1253,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRR_OP_LIST(RRR_VISITOR)
#undef RRR_VISITOR
+#undef RRR_OP_LIST
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
@@ -1258,7 +1267,10 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DsbIsb, g.NoOutput());
+}
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
@@ -1642,6 +1654,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
default:
break;
}
+ break;
}
default:
break;
@@ -1680,7 +1693,7 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
Arm64OperandGenerator g(this);
@@ -1692,7 +1705,7 @@ void InstructionSelector::EmitPrepareArguments(
// Bump the stack pointer(s).
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
- // TODO(titzer): it would be better to bump the csp here only
+ // TODO(titzer): it would be better to bump the sp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
}
@@ -1720,9 +1733,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1731,7 +1744,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
@@ -1939,6 +1952,9 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
// against {value}, depending on the condition.
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
+ // Branch poisoning requires flags to be set, so when it's enabled for
+ // a particular branch, we shouldn't be applying the cbz/tbz optimization.
+ DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -1991,7 +2007,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
return true;
}
}
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
if (value != 0) return false;
@@ -2010,16 +2027,18 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
- cond, cont)) {
- return;
- }
- } else if (m.left().HasValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
- commuted_cond, cont)) {
- return;
+ if (!cont->IsPoisoned()) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
+ return;
+ }
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2092,7 +2111,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- if (cont->IsBranch() && m.right().HasValue() &&
+ if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
@@ -2142,12 +2161,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
+} // namespace
+
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(this);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -2156,84 +2176,83 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
Node* const left = m.left().node();
- if (selector->CanCover(value, left) &&
- left->opcode() == IrOpcode::kWord64And) {
+ if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
// Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
// into a tbz/tbnz instruction.
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, left, cont)) {
+ this, left, cont)) {
return;
}
- return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+ return VisitWordCompare(this, left, kArm64Tst, cont, true,
kLogical64Imm);
}
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if (cont->IsBranch() || cont->IsDeoptimize()) {
- EmitBranchOrDeoptimize(selector,
- cont->Encode(kArm64CompareAndBranch),
+ if ((cont->IsBranch() || cont->IsDeoptimize()) &&
+ !cont->IsPoisoned()) {
+ EmitBranchOrDeoptimize(this, cont->Encode(kArm64CompareAndBranch),
g.UseRegister(left), cont);
return;
}
}
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -2245,30 +2264,30 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Add32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Sub32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32MulWithOverflow:
// ARM64 doesn't set the overflow flag for multiplication, so we
// need to test on kNotEqual. Here is the code sequence used:
// smull result, left, right
// cmp result.X(), Operand(result, SXTW)
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
kArithmeticImm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
kArithmeticImm, cont);
default:
break;
@@ -2277,23 +2296,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
+ return VisitWordCompare(this, value, kArm64Cmn32, cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst32, cont, true,
kLogical32Imm);
case IrOpcode::kWord64And:
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst, cont, true,
kLogical64Imm);
default:
break;
@@ -2302,80 +2321,52 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Branch could not be combined with a compare, compare against 0 and branch.
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(value), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ if (cont->IsPoisoned()) {
+ // We need an instruction that sets flags for poisoning to work.
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ }
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
+ EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value), cont->kind(),
+ cont->reason(), cont->feedback(), cont->frame_state());
} else {
DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- g.UseImmediate(cont->trap_id()));
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.UseImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kArm64Sub32, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2622,7 +2613,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2630,13 +2621,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2650,7 +2643,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2659,13 +2652,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2683,7 +2676,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2691,15 +2684,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2718,7 +2711,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2727,15 +2720,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2793,11 +2786,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2820,11 +2814,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4, 4) \
- V(16x8, 8) \
- V(8x16, 16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
@@ -2957,6 +2946,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
+#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2964,6 +2954,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
}
SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2971,6 +2962,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
+#undef SIMD_SHIFT_OP_LIST
#define SIMD_VISIT_BINOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2978,6 +2970,7 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
@@ -3132,6 +3125,26 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxtb32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxth32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtb, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxth, node);
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtw, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -3148,7 +3161,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ReverseBits |
- MachineOperatorBuilder::kWord64ReverseBits;
+ MachineOperatorBuilder::kWord64ReverseBits |
+ MachineOperatorBuilder::kSpeculationFence;
}
// static
@@ -3158,6 +3172,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
index a532851d84..25c4fcf77f 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
+#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
#include "src/eh-frame.h"
@@ -69,4 +69,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 074f19b308..3a5b729966 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -29,4 +29,4 @@ class BasicBlockInstrumentor : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 53c3435b55..3d71e98a12 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -16,7 +16,8 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
Zone* zone)
: AdvancedReducer(editor),
jsgraph_(js_graph),
- node_conditions_(zone, js_graph->graph()->NodeCount()),
+ node_conditions_(js_graph->graph()->NodeCount(), zone),
+ reduced_(js_graph->graph()->NodeCount(), zone),
zone_(zone),
dead_(js_graph->Dead()) {}
@@ -55,26 +56,32 @@ Reduction BranchElimination::Reduce(Node* node) {
Reduction BranchElimination::ReduceBranch(Node* node) {
Node* condition = node->InputAt(0);
Node* control_input = NodeProperties::GetControlInput(node, 0);
- const ControlPathConditions* from_input = node_conditions_.Get(control_input);
- if (from_input != nullptr) {
- Maybe<bool> condition_value = from_input->LookupCondition(condition);
- // If we know the condition we can discard the branch.
- if (condition_value.IsJust()) {
- bool known_value = condition_value.FromJust();
- for (Node* const use : node->uses()) {
- switch (use->opcode()) {
- case IrOpcode::kIfTrue:
- Replace(use, known_value ? control_input : dead());
- break;
- case IrOpcode::kIfFalse:
- Replace(use, known_value ? dead() : control_input);
- break;
- default:
- UNREACHABLE();
- }
+ ControlPathConditions from_input = node_conditions_.Get(control_input);
+ Node* branch;
+ bool condition_value;
+ // If we know the condition we can discard the branch.
+ if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ // Mark the branch as a safety check.
+ // Check if {branch} is dead because we might have a stale side-table entry.
+ if (IsSafetyCheckOf(node->op()) == IsSafetyCheck::kSafetyCheck &&
+ !branch->IsDead()) {
+ NodeProperties::ChangeOp(branch,
+ common()->MarkAsSafetyCheck(branch->op()));
+ }
+
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, condition_value ? control_input : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, condition_value ? dead() : control_input);
+ break;
+ default:
+ UNREACHABLE();
}
- return Replace(dead());
}
+ return Replace(dead());
}
return TakeConditionsFromFirstControl(node);
}
@@ -88,45 +95,53 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ControlPathConditions const* conditions = node_conditions_.Get(control);
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (conditions == nullptr) {
- return UpdateConditions(node, conditions);
- }
- Maybe<bool> condition_value = conditions->LookupCondition(condition);
- if (condition_value.IsJust()) {
+ if (!reduced_.Get(control)) {
+ return NoChange();
+ }
+
+ ControlPathConditions conditions = node_conditions_.Get(control);
+ bool condition_value;
+ Node* branch;
+ if (conditions.LookupCondition(condition, &branch, &condition_value)) {
+ // Mark the branch as a safety check.
+ if (p.is_safety_check() == IsSafetyCheck::kSafetyCheck) {
+ NodeProperties::ChangeOp(branch,
+ common()->MarkAsSafetyCheck(branch->op()));
+ }
+
// If we know the condition we can discard the branch.
- if (condition_is_true == condition_value.FromJust()) {
+ if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
- frame_state, effect, control);
+ common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
+ effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
}
return Replace(dead());
}
- return UpdateConditions(node, conditions, condition, condition_is_true);
+ return UpdateConditions(node, conditions, condition, node, condition_is_true);
}
Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
// Add the condition to the list arriving from the input branch.
Node* branch = NodeProperties::GetControlInput(node, 0);
- const ControlPathConditions* from_branch = node_conditions_.Get(branch);
+ ControlPathConditions from_branch = node_conditions_.Get(branch);
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (from_branch == nullptr) {
- return UpdateConditions(node, nullptr);
+ if (!reduced_.Get(branch)) {
+ return NoChange();
}
Node* condition = branch->InputAt(0);
- return UpdateConditions(node, from_branch, condition, is_true_branch);
+ return UpdateConditions(node, from_branch, condition, branch, is_true_branch);
}
@@ -143,8 +158,8 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
// input.
Node::Inputs inputs = node->inputs();
for (Node* input : inputs) {
- if (node_conditions_.Get(input) == nullptr) {
- return UpdateConditions(node, nullptr);
+ if (!reduced_.Get(input)) {
+ return NoChange();
}
}
@@ -152,42 +167,23 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
DCHECK_GT(inputs.count(), 0);
- const ControlPathConditions* first = node_conditions_.Get(*input_it);
+ ControlPathConditions conditions = node_conditions_.Get(*input_it);
++input_it;
- // Make a copy of the first input's conditions and merge with the conditions
- // from other inputs.
- ControlPathConditions* conditions =
- new (zone_->New(sizeof(ControlPathConditions)))
- ControlPathConditions(*first);
+ // Merge the first input's conditions with the conditions from the other
+ // inputs.
auto input_end = inputs.end();
for (; input_it != input_end; ++input_it) {
- conditions->Merge(*(node_conditions_.Get(*input_it)));
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+ conditions.ResetToCommonAncestor(node_conditions_.Get(*input_it));
}
-
return UpdateConditions(node, conditions);
}
Reduction BranchElimination::ReduceStart(Node* node) {
- return UpdateConditions(node, ControlPathConditions::Empty(zone_));
-}
-
-const BranchElimination::ControlPathConditions*
-BranchElimination::PathConditionsForControlNodes::Get(Node* node) const {
- if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
- return info_for_node_[node->id()];
- }
- return nullptr;
-}
-
-
-void BranchElimination::PathConditionsForControlNodes::Set(
- Node* node, const ControlPathConditions* conditions) {
- size_t index = static_cast<size_t>(node->id());
- if (index >= info_for_node_.size()) {
- info_for_node_.resize(index + 1, nullptr);
- }
- info_for_node_[index] = conditions;
+ return UpdateConditions(node, {});
}
@@ -200,157 +196,58 @@ Reduction BranchElimination::ReduceOtherControl(Node* node) {
Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
// We just propagate the information from the control input (ideally,
// we would only revisit control uses if there is change).
- const ControlPathConditions* from_input =
- node_conditions_.Get(NodeProperties::GetControlInput(node, 0));
- return UpdateConditions(node, from_input);
+ Node* input = NodeProperties::GetControlInput(node, 0);
+ if (!reduced_.Get(input)) return NoChange();
+ return UpdateConditions(node, node_conditions_.Get(input));
}
-
Reduction BranchElimination::UpdateConditions(
- Node* node, const ControlPathConditions* conditions) {
- const ControlPathConditions* original = node_conditions_.Get(node);
+ Node* node, ControlPathConditions conditions) {
// Only signal that the node has Changed if the condition information has
// changed.
- if (conditions != original) {
- if (conditions == nullptr || original == nullptr ||
- *conditions != *original) {
- node_conditions_.Set(node, conditions);
- return Changed(node);
- }
+ if (reduced_.Set(node, true) | node_conditions_.Set(node, conditions)) {
+ return Changed(node);
}
return NoChange();
}
Reduction BranchElimination::UpdateConditions(
- Node* node, const ControlPathConditions* prev_conditions,
- Node* current_condition, bool is_true_branch) {
- const ControlPathConditions* original = node_conditions_.Get(node);
- DCHECK(prev_conditions != nullptr && current_condition != nullptr);
+ Node* node, ControlPathConditions prev_conditions, Node* current_condition,
+ Node* current_branch, bool is_true_branch) {
+ ControlPathConditions original = node_conditions_.Get(node);
// The control path for the node is the path obtained by appending the
- // current_condition to the prev_conditions. Check if this new control path
- // would be the same as the already recorded path (original).
- if (original == nullptr || !prev_conditions->EqualsAfterAddingCondition(
- original, current_condition, is_true_branch)) {
- // If this is the first visit or if the control path is different from the
- // recorded path create the new control path and record it.
- const ControlPathConditions* new_condition =
- prev_conditions->AddCondition(zone_, current_condition, is_true_branch);
- node_conditions_.Set(node, new_condition);
- return Changed(node);
- }
- return NoChange();
-}
-
-// static
-const BranchElimination::ControlPathConditions*
-BranchElimination::ControlPathConditions::Empty(Zone* zone) {
- return new (zone->New(sizeof(ControlPathConditions)))
- ControlPathConditions(nullptr, 0);
-}
-
-
-void BranchElimination::ControlPathConditions::Merge(
- const ControlPathConditions& other) {
- // Change the current condition list to a longest common tail
- // of this condition list and the other list. (The common tail
- // should correspond to the list from the common dominator.)
-
- // First, we throw away the prefix of the longer list, so that
- // we have lists of the same length.
- size_t other_size = other.condition_count_;
- BranchCondition* other_condition = other.head_;
- while (other_size > condition_count_) {
- other_condition = other_condition->next;
- other_size--;
- }
- while (condition_count_ > other_size) {
- head_ = head_->next;
- condition_count_--;
- }
-
- // Then we go through both lists in lock-step until we find
- // the common tail.
- while (head_ != other_condition) {
- DCHECK_LT(0, condition_count_);
- condition_count_--;
- other_condition = other_condition->next;
- head_ = head_->next;
- }
-}
-
-
-const BranchElimination::ControlPathConditions*
-BranchElimination::ControlPathConditions::AddCondition(Zone* zone,
- Node* condition,
- bool is_true) const {
- DCHECK(LookupCondition(condition).IsNothing());
-
- BranchCondition* new_head = new (zone->New(sizeof(BranchCondition)))
- BranchCondition(condition, is_true, head_);
-
- ControlPathConditions* conditions =
- new (zone->New(sizeof(ControlPathConditions)))
- ControlPathConditions(new_head, condition_count_ + 1);
- return conditions;
-}
-
-
-Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
- Node* condition) const {
- for (BranchCondition* current = head_; current != nullptr;
- current = current->next) {
- if (current->condition == condition) {
- return Just<bool>(current->is_true);
+ // current_condition to the prev_conditions. Use the original control path as
+ // a hint to avoid allocations.
+ prev_conditions.AddCondition(zone_, current_condition, current_branch,
+ is_true_branch, original);
+ return UpdateConditions(node, prev_conditions);
+}
+
+void BranchElimination::ControlPathConditions::AddCondition(
+ Zone* zone, Node* condition, Node* branch, bool is_true,
+ ControlPathConditions hint) {
+ DCHECK_EQ(false, LookupCondition(condition, nullptr, nullptr));
+ PushFront({condition, branch, is_true}, zone, hint);
+}
+
+bool BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition, Node** branch, bool* is_true) const {
+ for (BranchCondition element : *this) {
+ if (element.condition == condition) {
+ *is_true = element.is_true;
+ *branch = element.branch;
+ return true;
}
}
- return Nothing<bool>();
-}
-
-bool BranchElimination::ControlPathConditions::IsSamePath(
- BranchCondition* this_condition, BranchCondition* other_condition) const {
- while (true) {
- if (this_condition == other_condition) return true;
- if (this_condition->condition != other_condition->condition ||
- this_condition->is_true != other_condition->is_true) {
- return false;
- }
- this_condition = this_condition->next;
- other_condition = other_condition->next;
+ return false;
}
- UNREACHABLE();
-}
-bool BranchElimination::ControlPathConditions::operator==(
- const ControlPathConditions& other) const {
- if (condition_count_ != other.condition_count_) return false;
- return IsSamePath(head_, other.head_);
-}
+ Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
-bool BranchElimination::ControlPathConditions::EqualsAfterAddingCondition(
- const ControlPathConditions* other, const Node* new_condition,
- bool new_branch_direction) const {
- // When an extra condition is added to the current chain, the count of
- // the resulting chain would increase by 1. Quick check to see if counts
- // match.
- if (other->condition_count_ != condition_count_ + 1) return false;
-
- // Check if the head of the other chain is same as the new condition that
- // would be added.
- if (other->head_->condition != new_condition ||
- other->head_->is_true != new_branch_direction) {
- return false;
+ CommonOperatorBuilder* BranchElimination::common() const {
+ return jsgraph()->common();
}
- // Check if the rest of the path is the same as the prev_condition.
- return IsSamePath(other->head_->next, head_);
-}
-
-Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
-
-CommonOperatorBuilder* BranchElimination::common() const {
- return jsgraph()->common();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index d78933e734..de3b9e5b2e 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
-#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#ifndef V8_COMPILER_BRANCH_ELIMINATION_H_
+#define V8_COMPILER_BRANCH_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/compiler/functional-list.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node-aux-data.h"
#include "src/globals.h"
namespace v8 {
@@ -30,56 +32,27 @@ class V8_EXPORT_PRIVATE BranchElimination final
private:
struct BranchCondition {
Node* condition;
+ Node* branch;
bool is_true;
- BranchCondition* next;
- BranchCondition(Node* condition, bool is_true, BranchCondition* next)
- : condition(condition), is_true(is_true), next(next) {}
+ bool operator==(BranchCondition other) const {
+ return condition == other.condition && branch == other.branch &&
+ is_true == other.is_true;
+ }
+ bool operator!=(BranchCondition other) const { return !(*this == other); }
};
// Class for tracking information about branch conditions.
// At the moment it is a linked list of conditions and their values
// (true or false).
- class ControlPathConditions {
+ class ControlPathConditions : public FunctionalList<BranchCondition> {
public:
- Maybe<bool> LookupCondition(Node* condition) const;
-
- const ControlPathConditions* AddCondition(Zone* zone, Node* condition,
- bool is_true) const;
- static const ControlPathConditions* Empty(Zone* zone);
- void Merge(const ControlPathConditions& other);
-
- bool IsSamePath(BranchCondition* first, BranchCondition* second) const;
- bool EqualsAfterAddingCondition(const ControlPathConditions* other,
- const Node* new_condition,
- bool new_branch_condition) const;
- bool operator==(const ControlPathConditions& other) const;
- bool operator!=(const ControlPathConditions& other) const {
- return !(*this == other);
- }
+ bool LookupCondition(Node* condition, Node** branch, bool* is_true) const;
+ void AddCondition(Zone* zone, Node* condition, Node* branch, bool is_true,
+ ControlPathConditions hint);
private:
- ControlPathConditions(BranchCondition* head, size_t condition_count)
- : head_(head), condition_count_(condition_count) {}
-
- BranchCondition* head_;
- // We keep track of the list length so that we can find the longest
- // common tail easily.
- size_t condition_count_;
- };
-
- // Maps each control node to the condition information known about the node.
- // If the information is nullptr, then we have not calculated the information
- // yet.
- class PathConditionsForControlNodes {
- public:
- PathConditionsForControlNodes(Zone* zone, size_t size_hint)
- : info_for_node_(size_hint, nullptr, zone) {}
- const ControlPathConditions* Get(Node* node) const;
- void Set(Node* node, const ControlPathConditions* conditions);
-
- private:
- ZoneVector<const ControlPathConditions*> info_for_node_;
+ using FunctionalList<BranchCondition>::PushFront;
};
Reduction ReduceBranch(Node* node);
@@ -91,11 +64,10 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceOtherControl(Node* node);
Reduction TakeConditionsFromFirstControl(Node* node);
- Reduction UpdateConditions(Node* node,
- const ControlPathConditions* conditions);
- Reduction UpdateConditions(Node* node,
- const ControlPathConditions* prev_conditions,
- Node* current_condition, bool is_true_branch);
+ Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
+ Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
+ Node* current_condition, Node* current_branch,
+ bool is_true_branch);
Node* dead() const { return dead_; }
Graph* graph() const;
@@ -103,7 +75,12 @@ class V8_EXPORT_PRIVATE BranchElimination final
CommonOperatorBuilder* common() const;
JSGraph* const jsgraph_;
- PathConditionsForControlNodes node_conditions_;
+
+ // Maps each control node to the condition information known about the node.
+ // If the information is nullptr, then we have not calculated the information
+ // yet.
+ NodeAuxData<ControlPathConditions> node_conditions_;
+ NodeAuxData<bool> reduced_;
Zone* zone_;
Node* dead_;
};
@@ -112,4 +89,4 @@ class V8_EXPORT_PRIVATE BranchElimination final
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#endif // V8_COMPILER_BRANCH_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 4ee30bcdf2..980869ccd3 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -61,6 +61,22 @@ bool BytecodeLoopAssignments::ContainsLocal(int index) const {
return bit_vector_->Contains(parameter_count_ + index);
}
+ResumeJumpTarget::ResumeJumpTarget(int suspend_id, int target_offset,
+ int final_target_offset)
+ : suspend_id_(suspend_id),
+ target_offset_(target_offset),
+ final_target_offset_(final_target_offset) {}
+
+ResumeJumpTarget ResumeJumpTarget::Leaf(int suspend_id, int target_offset) {
+ return ResumeJumpTarget(suspend_id, target_offset, target_offset);
+}
+
+ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
+ const ResumeJumpTarget& next) {
+ return ResumeJumpTarget(next.suspend_id(), loop_header_offset,
+ next.target_offset());
+}
+
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
Zone* zone, bool do_liveness_analysis)
: bytecode_array_(bytecode_array),
@@ -68,6 +84,7 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
zone_(zone),
loop_stack_(zone),
loop_end_index_queue_(zone),
+ resume_jump_targets_(zone),
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1),
@@ -80,6 +97,21 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ // Special case Suspend and Resume to just pass through liveness.
+ if (bytecode == Bytecode::kSuspendGenerator) {
+ // The generator object has to be live.
+ in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ // Suspend additionally reads and returns the accumulator
+ DCHECK(Bytecodes::ReadsAccumulator(bytecode));
+ in_liveness.MarkAccumulatorLive();
+ return;
+ }
+ if (bytecode == Bytecode::kResumeGenerator) {
+ // The generator object has to be live.
+ in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ return;
+ }
+
if (Bytecodes::WritesAccumulator(bytecode)) {
in_liveness.MarkAccumulatorDead();
}
@@ -175,6 +207,13 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
int current_offset = accessor.current_offset();
const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+ // Special case Suspend and Resume to just pass through liveness.
+ if (bytecode == Bytecode::kSuspendGenerator ||
+ bytecode == Bytecode::kResumeGenerator) {
+ out_liveness.Union(*next_bytecode_in_liveness);
+ return;
+ }
+
// Update from jump target (if any). Skip loops, we update these manually in
// the liveness iterations.
if (Bytecodes::IsForwardJump(bytecode)) {
@@ -197,9 +236,9 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
int handler_context;
// TODO(leszeks): We should look up this range only once per entry.
- HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+ HandlerTable table(*bytecode_array);
int handler_offset =
- table->LookupRange(current_offset, &handler_context, nullptr);
+ table.LookupRange(current_offset, &handler_context, nullptr);
if (handler_offset != -1) {
bool was_accumulator_live = out_liveness.AccumulatorIsLive();
@@ -221,6 +260,18 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
}
+void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+ BytecodeLivenessState** next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayAccessor& accessor,
+ const BytecodeLivenessMap& liveness_map) {
+ UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
+ accessor, liveness_map);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, accessor);
+
+ *next_bytecode_in_liveness = liveness.in;
+}
+
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
@@ -260,14 +311,21 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
- int osr_loop_end_offset =
- osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+ bool is_osr = !osr_bailout_id.IsNone();
+ int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1;
+
+ int generator_switch_index = -1;
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
+ if (bytecode == Bytecode::kSwitchOnGeneratorState) {
+ DCHECK_EQ(generator_switch_index, -1);
+ generator_switch_index = iterator.current_index();
+ }
+
if (bytecode == Bytecode::kJumpLoop) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
@@ -298,32 +356,84 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// information we currently have.
UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+ // Update suspend counts for this loop, though only if not OSR.
+ if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ int suspend_id = iterator.GetUnsignedImmediateOperand(3);
+ int resume_offset = current_offset + iterator.current_bytecode_size();
+ current_loop_info->AddResumeTarget(
+ ResumeJumpTarget::Leaf(suspend_id, resume_offset));
+ }
+
+ // If we've reached the header of the loop, pop it off the stack.
if (current_offset == current_loop.header_offset) {
loop_stack_.pop();
if (loop_stack_.size() > 1) {
- // Propagate inner loop assignments to outer loop.
- loop_stack_.top().loop_info->assignments().Union(
+ // If there is still an outer loop, propagate inner loop assignments.
+ LoopInfo* parent_loop_info = loop_stack_.top().loop_info;
+
+ parent_loop_info->assignments().Union(
current_loop_info->assignments());
+
+ // Also, propagate resume targets. Instead of jumping to the target
+ // itself, the outer loop will jump to this loop header for any
+ // targets that are inside the current loop, so that this loop stays
+ // reducible. Hence, a nested loop of the form:
+ //
+ // switch (#1 -> suspend1, #2 -> suspend2)
+ // loop {
+ // suspend1: suspend #1
+ // loop {
+ // suspend2: suspend #2
+ // }
+ // }
+ //
+ // becomes:
+ //
+ // switch (#1 -> loop1, #2 -> loop1)
+ // loop1: loop {
+ // switch (#1 -> suspend1, #2 -> loop2)
+ // suspend1: suspend #1
+ // loop2: loop {
+ // switch (#2 -> suspend2)
+ // suspend2: suspend #2
+ // }
+ // }
+ for (const auto& target : current_loop_info->resume_jump_targets()) {
+ parent_loop_info->AddResumeTarget(
+ ResumeJumpTarget::AtLoopHeader(current_offset, target));
+ }
+
+ } else {
+ // Otherwise, just propagate inner loop suspends to top-level.
+ for (const auto& target : current_loop_info->resume_jump_targets()) {
+ resume_jump_targets_.push_back(
+ ResumeJumpTarget::AtLoopHeader(current_offset, target));
+ }
}
}
+ } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ // If we're not in a loop, we still need to look for suspends.
+ // TODO(leszeks): It would be nice to de-duplicate this with the in-loop
+ // case
+ int suspend_id = iterator.GetUnsignedImmediateOperand(3);
+ int resume_offset = current_offset + iterator.current_bytecode_size();
+ resume_jump_targets_.push_back(
+ ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
if (do_liveness_analysis_) {
BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
current_offset, bytecode_array()->register_count(), zone());
-
- UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
- liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, *liveness.in, iterator);
-
- next_bytecode_in_liveness = liveness.in;
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
}
}
DCHECK_EQ(loop_stack_.size(), 1u);
DCHECK_EQ(loop_stack_.top().header_offset, -1);
+ DCHECK(ResumeJumpTargetsAreValid());
+
if (!do_liveness_analysis_) return;
// At this point, every bytecode has a valid in and out liveness, except for
@@ -374,16 +484,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
--iterator;
for (; iterator.current_offset() > header_offset; --iterator) {
Bytecode bytecode = iterator.current_bytecode();
-
int current_offset = iterator.current_offset();
BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
- UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
- liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, *liveness.in, iterator);
-
- next_bytecode_in_liveness = liveness.in;
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
@@ -391,6 +496,47 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
next_bytecode_in_liveness, iterator, liveness_map_);
}
+ // Process the generator switch statement separately, once the loops are done.
+ // This has to be a separate pass because the generator switch can jump into
+ // the middle of loops (and is the only kind of jump that can jump across a
+ // loop header).
+ if (generator_switch_index != -1) {
+ iterator.GoToIndex(generator_switch_index);
+ DCHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnGeneratorState);
+
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& switch_liveness =
+ liveness_map_.GetLiveness(current_offset);
+
+ bool any_changed = false;
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ if (switch_liveness.out->UnionIsChanged(
+ *liveness_map_.GetInLiveness(entry.target_offset))) {
+ any_changed = true;
+ }
+ }
+
+ // If the switch liveness changed, we have to propagate it up the remaining
+ // bytecodes before it.
+ if (any_changed) {
+ switch_liveness.in->CopyFrom(*switch_liveness.out);
+ UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, *switch_liveness.in,
+ iterator);
+ next_bytecode_in_liveness = switch_liveness.in;
+ for (--iterator; iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ // There shouldn't be any more loops.
+ DCHECK_NE(bytecode, Bytecode::kJumpLoop);
+
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
+ }
+ }
+ }
+
DCHECK(LivenessIsValid());
}
@@ -497,6 +643,154 @@ std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
}
#if DEBUG
+bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
+ bool valid = true;
+
+ // Find the generator switch.
+ interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ for (iterator.GoToStart(); iterator.IsValid(); ++iterator) {
+ if (iterator.current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
+ break;
+ }
+ }
+
+ // If the iterator is invalid, we've reached the end without finding the
+ // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we
+ // need no jump targets. So, ensure there are no jump targets and exit.
+ if (!iterator.IsValid() || HasOsrEntryPoint()) {
+ // Check top-level.
+ if (!resume_jump_targets().empty()) {
+ PrintF(stderr,
+ "Found %zu top-level resume targets but no resume switch\n",
+ resume_jump_targets().size());
+ valid = false;
+ }
+ // Check loops.
+ for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ if (!loop_info.second.resume_jump_targets().empty()) {
+ PrintF(stderr,
+ "Found %zu resume targets at loop at offset %d, but no resume "
+ "switch\n",
+ loop_info.second.resume_jump_targets().size(), loop_info.first);
+ valid = false;
+ }
+ }
+
+ return valid;
+ }
+
+ // Otherwise, we've found the resume switch. Check that the top level jumps
+ // only to leaves and loop headers, then check that each loop header handles
+ // all the unresolved jumps, also jumping only to leaves and inner loop
+ // headers.
+
+ // First collect all required suspend ids.
+ std::map<int, int> unresolved_suspend_ids;
+ for (const interpreter::JumpTableTargetOffset& offset :
+ iterator.GetJumpTableTargetOffsets()) {
+ int suspend_id = offset.case_value;
+ int resume_offset = offset.target_offset;
+
+ unresolved_suspend_ids[suspend_id] = resume_offset;
+ }
+
+ // Check top-level.
+ if (!ResumeJumpTargetLeavesResolveSuspendIds(-1, resume_jump_targets(),
+ &unresolved_suspend_ids)) {
+ valid = false;
+ }
+ // Check loops.
+ for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ if (!ResumeJumpTargetLeavesResolveSuspendIds(
+ loop_info.first, loop_info.second.resume_jump_targets(),
+ &unresolved_suspend_ids)) {
+ valid = false;
+ }
+ }
+
+ // Check that everything is resolved.
+ if (!unresolved_suspend_ids.empty()) {
+ PrintF(stderr,
+ "Found suspend ids that are not resolved by a final leaf resume "
+ "jump:\n");
+
+ for (const std::pair<int, int>& target : unresolved_suspend_ids) {
+ PrintF(stderr, " %d -> %d\n", target.first, target.second);
+ }
+ valid = false;
+ }
+
+ return valid;
+}
+
+bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
+ int parent_offset, const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ std::map<int, int>* unresolved_suspend_ids) {
+ bool valid = true;
+ for (const ResumeJumpTarget& target : resume_jump_targets) {
+ std::map<int, int>::iterator it =
+ unresolved_suspend_ids->find(target.suspend_id());
+ if (it == unresolved_suspend_ids->end()) {
+ PrintF(
+ stderr,
+ "No unresolved suspend found for resume target with suspend id %d\n",
+ target.suspend_id());
+ valid = false;
+ continue;
+ }
+ int expected_target = it->second;
+
+ if (target.is_leaf()) {
+ // Leaves should have the expected target as their target.
+ if (target.target_offset() != expected_target) {
+ PrintF(
+ stderr,
+ "Expected leaf resume target for id %d to have target offset %d, "
+ "but had %d\n",
+ target.suspend_id(), expected_target, target.target_offset());
+ valid = false;
+ } else {
+ // Make sure we're resuming to a Resume bytecode
+ interpreter::BytecodeArrayAccessor assessor(bytecode_array(),
+ target.target_offset());
+ if (assessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ PrintF(stderr,
+ "Expected resume target for id %d, offset %d, to be "
+ "ResumeGenerator, but found %s\n",
+ target.suspend_id(), target.target_offset(),
+ Bytecodes::ToString(assessor.current_bytecode()));
+
+ valid = false;
+ }
+ }
+ // We've resolved this suspend id, so erase it to make sure we don't
+ // resolve it twice.
+ unresolved_suspend_ids->erase(it);
+ } else {
+ // Non-leaves should have a direct inner loop header as their target.
+ if (!IsLoopHeader(target.target_offset())) {
+ PrintF(stderr,
+ "Expected non-leaf resume target for id %d to have a loop "
+ "header at target offset %d\n",
+ target.suspend_id(), target.target_offset());
+ valid = false;
+ } else {
+ LoopInfo loop_info = GetLoopInfoFor(target.target_offset());
+ if (loop_info.parent_offset() != parent_offset) {
+ PrintF(stderr,
+ "Expected non-leaf resume target for id %d to have a direct "
+ "inner loop at target offset %d\n",
+ target.suspend_id(), target.target_offset());
+ valid = false;
+ }
+ // If the target loop is a valid inner loop, we'll check its validity
+ // when we analyze its resume targets.
+ }
+ }
+ }
+ return valid;
+}
+
bool BytecodeAnalysis::LivenessIsValid() {
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index f6cd6e3cab..6ff9ed021a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -39,15 +39,49 @@ class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
BitVector* bit_vector_;
};
+// Jump targets for resuming a suspended generator.
+class V8_EXPORT_PRIVATE ResumeJumpTarget {
+ public:
+ // Create a resume jump target representing an actual resume.
+ static ResumeJumpTarget Leaf(int suspend_id, int target_offset);
+
+ // Create a resume jump target at a loop header, which will have another
+ // resume jump after the loop header is crossed.
+ static ResumeJumpTarget AtLoopHeader(int loop_header_offset,
+ const ResumeJumpTarget& next);
+
+ int suspend_id() const { return suspend_id_; }
+ int target_offset() const { return target_offset_; }
+ bool is_leaf() const { return target_offset_ == final_target_offset_; }
+
+ private:
+ // The suspend id of the resume.
+ int suspend_id_;
+ // The target offset of this resume jump.
+ int target_offset_;
+ // The final offset of this resume, which may be across multiple jumps.
+ int final_target_offset_;
+
+ ResumeJumpTarget(int suspend_id, int target_offset, int final_target_offset);
+};
+
struct V8_EXPORT_PRIVATE LoopInfo {
public:
LoopInfo(int parent_offset, int parameter_count, int register_count,
Zone* zone)
: parent_offset_(parent_offset),
- assignments_(parameter_count, register_count, zone) {}
+ assignments_(parameter_count, register_count, zone),
+ resume_jump_targets_(zone) {}
int parent_offset() const { return parent_offset_; }
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
+ return resume_jump_targets_;
+ }
+ void AddResumeTarget(const ResumeJumpTarget& target) {
+ resume_jump_targets_.push_back(target);
+ }
+
BytecodeLoopAssignments& assignments() { return assignments_; }
const BytecodeLoopAssignments& assignments() const { return assignments_; }
@@ -55,6 +89,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
// The offset to the parent loop, or -1 if there is no parent.
int parent_offset_;
BytecodeLoopAssignments assignments_;
+ ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
@@ -78,10 +113,16 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
+ // Get the top-level resume jump targets.
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
+ return resume_jump_targets_;
+ }
+
// True if the current analysis has an OSR entry point.
bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; }
int osr_entry_point() const { return osr_entry_point_; }
+
// Gets the in-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
@@ -99,6 +140,12 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
void PushLoop(int loop_header, int loop_end);
#if DEBUG
+ bool ResumeJumpTargetsAreValid();
+ bool ResumeJumpTargetLeavesResolveSuspendIds(
+ int parent_offset,
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ std::map<int, int>* unresolved_suspend_ids);
+
bool LivenessIsValid();
#endif
@@ -112,6 +159,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
+ ZoneVector<ResumeJumpTarget> resume_jump_targets_;
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 54a924fce4..3b2a3eb252 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -9,6 +9,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
@@ -40,6 +41,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
+ Node* LookupGeneratorState() const;
void BindAccumulator(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -48,6 +50,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
void BindRegistersToProjections(
interpreter::Register first_reg, Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
+ void BindGeneratorState(Node* node);
void RecordAfterState(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -108,6 +111,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* effect_dependency_;
NodeVector values_;
Node* parameters_state_values_;
+ Node* generator_state_;
int register_base_;
int accumulator_base_;
};
@@ -138,7 +142,8 @@ BytecodeGraphBuilder::Environment::Environment(
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
values_(builder->local_zone()),
- parameters_state_values_(nullptr) {
+ parameters_state_values_(nullptr),
+ generator_state_(nullptr) {
// The layout of values_ is:
//
// [receiver] [parameters] [registers] [accumulator]
@@ -191,6 +196,7 @@ BytecodeGraphBuilder::Environment::Environment(
effect_dependency_(other->effect_dependency_),
values_(other->zone()),
parameters_state_values_(other->parameters_state_values_),
+ generator_state_(other->generator_state_),
register_base_(other->register_base_),
accumulator_base_(other->accumulator_base_) {
values_ = other->values_;
@@ -210,6 +216,10 @@ Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
return values()->at(accumulator_base_);
}
+Node* BytecodeGraphBuilder::Environment::LookupGeneratorState() const {
+ DCHECK_NOT_NULL(generator_state_);
+ return generator_state_;
+}
Node* BytecodeGraphBuilder::Environment::LookupRegister(
interpreter::Register the_register) const {
@@ -231,6 +241,10 @@ void BytecodeGraphBuilder::Environment::BindAccumulator(
values()->at(accumulator_base_) = node;
}
+void BytecodeGraphBuilder::Environment::BindGeneratorState(Node* node) {
+ generator_state_ = node;
+}
+
void BytecodeGraphBuilder::Environment::BindRegister(
interpreter::Register the_register, Node* node,
FrameStateAttachmentMode mode) {
@@ -291,9 +305,18 @@ void BytecodeGraphBuilder::Environment::Merge(
for (int i = 0; i < register_count(); i++) {
int index = register_base() + i;
if (liveness == nullptr || liveness->RegisterIsLive(i)) {
- DCHECK_NE(values_[index], builder()->jsgraph()->OptimizedOutConstant());
- DCHECK_NE(other->values_[index],
- builder()->jsgraph()->OptimizedOutConstant());
+#if DEBUG
+ // We only do these DCHECKs when we are not in the resume path of a
+ // generator -- this is, when either there is no generator state at all,
+ // or the generator state is not the constant "executing" value.
+ if (generator_state_ == nullptr ||
+ NumberMatcher(generator_state_)
+ .Is(JSGeneratorObject::kGeneratorExecuting)) {
+ DCHECK_NE(values_[index], builder()->jsgraph()->OptimizedOutConstant());
+ DCHECK_NE(other->values_[index],
+ builder()->jsgraph()->OptimizedOutConstant());
+ }
+#endif
values_[index] =
builder()->MergeValue(values_[index], other->values_[index], control);
@@ -315,6 +338,12 @@ void BytecodeGraphBuilder::Environment::Merge(
} else {
values_[accumulator_base()] = builder()->jsgraph()->OptimizedOutConstant();
}
+
+ if (generator_state_ != nullptr) {
+ DCHECK_NOT_NULL(other->generator_state_);
+ generator_state_ = builder()->MergeValue(generator_state_,
+ other->generator_state_, control);
+ }
}
void BytecodeGraphBuilder::Environment::PrepareForLoop(
@@ -345,6 +374,10 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop(
// The accumulator should not be live on entry.
DCHECK_IMPLIES(liveness != nullptr, !liveness->AccumulatorIsLive());
+ if (generator_state_ != nullptr) {
+ generator_state_ = builder()->NewPhi(1, generator_state_, control);
+ }
+
// Connect to the loop end.
Node* terminate = builder()->graph()->NewNode(
builder()->common()->Terminate(), effect, control);
@@ -423,6 +456,11 @@ void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
values_[accumulator_base()], loop_exit);
values_[accumulator_base()] = rename;
}
+
+ if (generator_state_ != nullptr) {
+ generator_state_ = graph()->NewNode(common()->LoopExitValue(),
+ generator_state_, loop_exit);
+ }
}
void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
@@ -483,8 +521,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
bytecode_array_(handle(shared_info->bytecode_array())),
- exception_handler_table_(
- handle(HandlerTable::cast(bytecode_array()->handler_table()))),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
@@ -498,6 +534,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
currently_peeled_loop_offset_(-1),
stack_check_(stack_check),
merge_environments_(local_zone),
+ generator_merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
input_buffer_size_(0),
@@ -529,7 +566,7 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
- return VectorSlotPair(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return VectorSlotPair(feedback_vector(), FeedbackVector::ToSlot(slot_id));
}
void BytecodeGraphBuilder::CreateGraph() {
@@ -847,6 +884,11 @@ void BytecodeGraphBuilder::VisitBytecodes() {
bytecode_analysis.PrintLivenessTo(of);
}
+ if (!bytecode_analysis.resume_jump_targets().empty()) {
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
+
if (bytecode_analysis.HasOsrEntryPoint()) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
@@ -1393,14 +1435,17 @@ void BytecodeGraphBuilder::VisitPopContext() {
void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- int const slot_id = bytecode_iterator().GetIndexOperand(1);
- VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
PretenureFlag tenured =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
? TENURED
: NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
+ const Operator* op = javascript()->CreateClosure(
+ shared_info, nexus.GetFeedbackCell(),
+ handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy)),
+ tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
@@ -1540,12 +1585,21 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
Handle<TemplateObjectDescription> description =
Handle<TemplateObjectDescription>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- // It's not observable when the template object is created, so we
- // can just create it eagerly during graph building and bake in
- // the JSArray constant here.
- Node* template_object =
- jsgraph()->HeapConstant(TemplateObjectDescription::GetTemplateObject(
- description, native_context()));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
+
+ Handle<JSArray> cached_value;
+ if (nexus.GetFeedback() == Smi::kZero) {
+ // It's not observable when the template object is created, so we
+ // can just create it eagerly during graph building and bake in
+ // the JSArray constant here.
+ cached_value = TemplateObjectDescription::CreateTemplateObject(description);
+ nexus.vector()->Set(slot, *cached_value);
+ } else {
+ cached_value = handle(JSArray::cast(nexus.GetFeedback()));
+ }
+
+ Node* template_object = jsgraph()->HeapConstant(cached_value);
environment()->BindAccumulator(template_object);
}
@@ -2015,8 +2069,8 @@ void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* operand = environment()->LookupAccumulator();
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedUnaryOp(op, operand, slot);
if (lowering.IsExit()) return;
@@ -2038,8 +2092,8 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2059,28 +2113,23 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
// feedback.
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(operand_index));
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot));
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ FeedbackNexus nexus(feedback_vector(), slot);
return nexus.GetBinaryOperationFeedback();
}
// Helper function to create compare operation hint from the recorded type
// feedback.
CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
- int slot_index = bytecode_iterator().GetIndexOperand(1);
- FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot));
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
return nexus.GetCompareOperationFeedback();
}
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(operand_index));
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ FeedbackNexus nexus(feedback_vector(), slot);
switch (nexus.GetForInFeedback()) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@@ -2095,13 +2144,13 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
- CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
return CallFrequency(nexus.ComputeCallFrequency() *
invocation_frequency_.value());
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
- CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
return nexus.GetSpeculationMode();
}
@@ -2173,8 +2222,8 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2288,8 +2337,7 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- int slot_index = bytecode_iterator().GetIndexOperand(1);
- FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2452,8 +2500,7 @@ void BytecodeGraphBuilder::VisitToNumber() {
PrepareEagerCheckpoint();
Node* object = environment()->LookupAccumulator();
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(0);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedToNumber(object, slot);
@@ -2474,8 +2521,7 @@ void BytecodeGraphBuilder::VisitToNumeric() {
// If we have some kind of Number feedback, we do the same lowering as for
// ToNumber.
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(0);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedToNumber(object, slot);
@@ -2591,15 +2637,19 @@ void BytecodeGraphBuilder::VisitSetPendingMessage() {
environment()->BindAccumulator(previous_message);
}
-void BytecodeGraphBuilder::VisitReturn() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
- bytecode_iterator().current_offset()));
+void BytecodeGraphBuilder::BuildReturn(const BytecodeLivenessState* liveness) {
+ BuildLoopExitsForFunctionExit(liveness);
Node* pop_node = jsgraph()->ZeroConstant();
Node* control =
NewNode(common()->Return(), pop_node, environment()->LookupAccumulator());
MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::VisitReturn() {
+ BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset()));
+}
+
void BytecodeGraphBuilder::VisitDebugger() {
PrepareEagerCheckpoint();
Node* call = NewNode(javascript()->Debugger());
@@ -2633,8 +2683,7 @@ void BytecodeGraphBuilder::VisitForInPrepare() {
PrepareEagerCheckpoint();
Node* enumerator = environment()->LookupAccumulator();
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedForInPrepare(enumerator, slot);
if (lowering.IsExit()) return;
@@ -2675,8 +2724,7 @@ void BytecodeGraphBuilder::VisitForInNext() {
environment()->GetControlDependency());
environment()->UpdateEffectDependency(index);
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(3);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedForInNext(
receiver, cache_array, cache_type, index, slot);
if (lowering.IsExit()) return;
@@ -2714,54 +2762,135 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
jsgraph()->Constant(bytecode_iterator().current_offset() +
(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ const BytecodeLivenessState* liveness = bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset());
+
+ // Maybe overallocate the value list since we don't know how many registers
+ // are live.
+ // TODO(leszeks): We could get this count from liveness rather than the
+ // register list.
int value_input_count = 3 + register_count;
Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
value_inputs[0] = generator;
value_inputs[1] = suspend_id;
value_inputs[2] = offset;
+
+ int count_written = 0;
for (int i = 0; i < register_count; ++i) {
- value_inputs[3 + i] =
- environment()->LookupRegister(interpreter::Register(i));
+ if (liveness == nullptr || liveness->RegisterIsLive(i)) {
+ while (count_written < i) {
+ value_inputs[3 + count_written++] = jsgraph()->OptimizedOutConstant();
+ }
+ value_inputs[3 + count_written++] =
+ environment()->LookupRegister(interpreter::Register(i));
+ DCHECK_EQ(count_written, i + 1);
+ }
}
- MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ // Use the actual written count rather than the register count to create the
+ // node.
+ MakeNode(javascript()->GeneratorStore(count_written), 3 + count_written,
value_inputs, false);
+
+ // TODO(leszeks): This over-approximates the liveness at exit, only the
+ // accumulator should be live by this point.
+ BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset()));
}
-void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
- Node* generator = environment()->LookupRegister(
- bytecode_iterator().GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildSwitchOnGeneratorState(
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ bool allow_fallthrough_on_executing) {
+ Node* generator_state = environment()->LookupGeneratorState();
+
+ int extra_cases = allow_fallthrough_on_executing ? 2 : 1;
+ NewSwitch(generator_state,
+ static_cast<int>(resume_jump_targets.size() + extra_cases));
+ for (const ResumeJumpTarget& target : resume_jump_targets) {
+ SubEnvironment sub_environment(this);
+ NewIfValue(target.suspend_id());
+ if (target.is_leaf()) {
+ // Mark that we are resuming executing.
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
+ // Jump to the target offset, whether it's a loop header or the resume.
+ MergeIntoSuccessorEnvironment(target.target_offset());
+ }
+
+ {
+ SubEnvironment sub_environment(this);
+ // We should never hit the default case (assuming generator state cannot be
+ // corrupted), so abort if we do.
+ // TODO(leszeks): Maybe only check this in debug mode, and otherwise use
+ // the default to represent one of the cases above/fallthrough below?
+ NewIfDefault();
+ NewNode(simplified()->RuntimeAbort(AbortReason::kInvalidJumpTableIndex));
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+
+ if (allow_fallthrough_on_executing) {
+ // If we are executing (rather than resuming), and we allow it, just fall
+ // through to the actual loop body.
+ NewIfValue(JSGeneratorObject::kGeneratorExecuting);
+ } else {
+ // Otherwise, this environment is dead.
+ set_environment(nullptr);
+ }
+}
+
+void BytecodeGraphBuilder::VisitSwitchOnGeneratorState() {
+ Node* generator =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+
+ Node* generator_is_undefined =
+ NewNode(simplified()->ReferenceEqual(), generator,
+ jsgraph()->UndefinedConstant());
- Node* state =
- NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+ NewBranch(generator_is_undefined);
+ {
+ SubEnvironment resume_env(this);
+ NewIfFalse();
+
+ Node* generator_state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+ environment()->BindGeneratorState(generator_state);
+
+ Node* generator_context =
+ NewNode(javascript()->GeneratorRestoreContext(), generator);
+ environment()->SetContext(generator_context);
+
+ BuildSwitchOnGeneratorState(bytecode_analysis()->resume_jump_targets(),
+ false);
+ }
- environment()->BindAccumulator(state, Environment::kAttachFrameState);
+ // Fallthrough for the first-call case.
+ NewIfTrue();
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* generator =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register generator_state_reg =
- bytecode_iterator().GetRegisterOperand(1);
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(2);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
- int register_count =
- static_cast<int>(bytecode_iterator().GetRegisterCountOperand(3));
+
+ const BytecodeLivenessState* liveness =
+ bytecode_analysis()->GetOutLivenessFor(
+ bytecode_iterator().current_offset());
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
- for (int i = 0; i < register_count; ++i) {
- Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
- environment()->BindRegister(interpreter::Register(i), value);
+ for (int i = 0; i < environment()->register_count(); ++i) {
+ if (liveness == nullptr || liveness->RegisterIsLive(i)) {
+ Node* value =
+ NewNode(javascript()->GeneratorRestoreRegister(i), generator);
+ environment()->BindRegister(interpreter::Register(i), value);
+ }
}
- // We're no longer resuming, so update the state register.
- environment()->BindRegister(
- generator_state_reg,
- jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
-
// Update the accumulator with the generator's input_or_debug_pos.
Node* input_or_debug_pos =
NewNode(javascript()->GeneratorRestoreInputOrDebugPos(), generator);
@@ -2803,12 +2932,29 @@ void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
const BytecodeLivenessState* liveness =
bytecode_analysis()->GetInLivenessFor(current_offset);
+ const auto& resume_jump_targets = loop_info.resume_jump_targets();
+ bool generate_suspend_switch = !resume_jump_targets.empty();
+
// Add loop header.
environment()->PrepareForLoop(loop_info.assignments(), liveness);
// Store a copy of the environment so we can connect merged back edge inputs
// to the loop header.
merge_environments_[current_offset] = environment()->Copy();
+
+ // If this loop contains resumes, create a new switch just after the loop
+ // for those resumes.
+ if (generate_suspend_switch) {
+ BuildSwitchOnGeneratorState(loop_info.resume_jump_targets(), true);
+
+ // TODO(leszeks): At this point we know we are executing rather than
+ // resuming, so we should be able to prune off the phis in the environment
+ // related to the resume path.
+
+ // Set the generator state to a known constant.
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
}
}
@@ -2874,7 +3020,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition);
+ NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -2884,7 +3030,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition);
+ NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -2908,7 +3054,8 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
+ IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -2920,7 +3067,8 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
+ IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3123,8 +3271,7 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
- Handle<HandlerTable> table = exception_handler_table();
- int num_entries = table->NumberOfRangeEntries();
+ HandlerTable table(*bytecode_array());
// Potentially exit exception handlers.
while (!exception_handlers_.empty()) {
@@ -3134,12 +3281,13 @@ void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
}
// Potentially enter exception handlers.
+ int num_entries = table.NumberOfRangeEntries();
while (current_exception_handler_ < num_entries) {
- int next_start = table->GetRangeStart(current_exception_handler_);
+ int next_start = table.GetRangeStart(current_exception_handler_);
if (current_offset < next_start) break; // Not yet covered by range.
- int next_end = table->GetRangeEnd(current_exception_handler_);
- int next_handler = table->GetRangeHandler(current_exception_handler_);
- int context_register = table->GetRangeData(current_exception_handler_);
+ int next_end = table.GetRangeEnd(current_exception_handler_);
+ int next_handler = table.GetRangeHandler(current_exception_handler_);
+ int context_register = table.GetRangeData(current_exception_handler_);
exception_handlers_.push(
{next_start, next_end, next_handler, context_register});
current_exception_handler_++;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 91b857298c..75d464f79e 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -100,8 +100,9 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
- return NewNode(common()->Branch(hint), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
+ return NewNode(common()->Branch(hint, is_safety_check), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -252,6 +253,9 @@ class BytecodeGraphBuilder {
void BuildJumpIfJSReceiver();
void BuildSwitchOnSmi(Node* condition);
+ void BuildSwitchOnGeneratorState(
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ bool allow_fallthrough_on_executing);
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
@@ -268,6 +272,9 @@ class BytecodeGraphBuilder {
void BuildLoopExitsUntilLoop(int loop_offset,
const BytecodeLivenessState* liveness);
+ // Helper for building a return (from an actual return or a suspend).
+ void BuildReturn(const BytecodeLivenessState* liveness);
+
// Simulates entry and exit of exception handlers.
void ExitThenEnterExceptionHandlers(int current_offset);
@@ -303,9 +310,6 @@ class BytecodeGraphBuilder {
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
- const Handle<HandlerTable>& exception_handler_table() const {
- return exception_handler_table_;
- }
const Handle<FeedbackVector>& feedback_vector() const {
return feedback_vector_;
}
@@ -366,7 +370,6 @@ class BytecodeGraphBuilder {
JSGraph* jsgraph_;
CallFrequency const invocation_frequency_;
Handle<BytecodeArray> bytecode_array_;
- Handle<HandlerTable> exception_handler_table_;
Handle<FeedbackVector> feedback_vector_;
const JSTypeHintLowering type_hint_lowering_;
const FrameStateFunctionInfo* frame_state_function_info_;
@@ -379,9 +382,18 @@ class BytecodeGraphBuilder {
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
- // values from all predecessors of the merge in question.
+ // values from all predecessors of the merge in question. They are indexed by
+ // the bytecode offset
ZoneMap<int, Environment*> merge_environments_;
+ // Generator merge environments are snapshots of the current resume
+ // environment, tracing back through loop headers to the resume switch of a
+ // generator. They allow us to model a single resume jump as several switch
+ // statements across loop headers, keeping those loop headers reducible,
+ // without having to merge the "executing" environments of the generator into
+ // the "resuming" ones. They are indexed by the suspend id of the resume.
+ ZoneMap<int, Environment*> generator_merge_environments_;
+
// Exception handlers currently entered by the iteration.
ZoneStack<ExceptionHandler> exception_handlers_;
int current_exception_handler_;
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 330b19fac3..02b6f5fb3d 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -165,8 +165,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
#ifdef UNSUPPORTED_C_LINKAGE
// This method should not be called on unknown architectures.
- V8_Fatal(__FILE__, __LINE__,
- "requested C call descriptor on unsupported architecture");
+ FATAL("requested C call descriptor on unsupported architecture");
return nullptr;
#endif
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 071f8952db..4ad286c68c 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -6,6 +6,7 @@
#include <ostream>
+#include "src/builtins/constants-table-builder.h"
#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
@@ -58,6 +59,8 @@ CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
Code::Kind kind, const char* name, size_t result_size, uint32_t stub_key,
int32_t builtin_index)
+ // TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
+ // bytecode handlers?
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
@@ -232,6 +235,33 @@ bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
: IsInt32AbsWithOverflowSupported();
}
+#ifdef V8_EMBEDDED_BUILTINS
+TNode<HeapObject> CodeAssembler::LookupConstant(Handle<HeapObject> object) {
+ DCHECK(isolate()->serializer_enabled());
+
+ // Ensure the given object is in the builtins constants table and fetch its
+ // index.
+ BuiltinsConstantsTableBuilder* builder =
+ isolate()->builtins_constants_table_builder();
+ uint32_t index = builder->AddObject(object);
+
+ // The builtins constants table is loaded through the root register on all
+ // supported platforms. This is checked by the
+ // VerifyBuiltinsIsolateIndependence cctest, which disallows embedded objects
+ // in isolate-independent builtins.
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
+ Heap::kBuiltinsConstantsTableRootIndex));
+ TNode<FixedArray> builtins_constants_table = UncheckedCast<FixedArray>(
+ LoadRoot(Heap::kBuiltinsConstantsTableRootIndex));
+
+ // Generate the lookup.
+ const int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> offset = IntPtrConstant(header_size + kPointerSize * index);
+ return UncheckedCast<HeapObject>(
+ Load(MachineType::AnyTagged(), builtins_constants_table, offset));
+}
+#endif // V8_EMBEDDED_BUILTINS
+
TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) {
return UncheckedCast<Int32T>(raw_assembler()->Int32Constant(value));
}
@@ -264,12 +294,23 @@ TNode<Smi> CodeAssembler::SmiConstant(int value) {
TNode<HeapObject> CodeAssembler::UntypedHeapConstant(
Handle<HeapObject> object) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // Root constants are simply loaded from the root list, while non-root
+ // constants must be looked up from the builtins constants table.
+ if (ShouldLoadConstantsFromRootList()) {
+ Heap::RootListIndex root_index;
+ if (!isolate()->heap()->IsRootHandle(object, &root_index)) {
+ return LookupConstant(object);
+ }
+ }
+#endif // V8_EMBEDDED_BUILTINS
return UncheckedCast<HeapObject>(raw_assembler()->HeapConstant(object));
}
TNode<String> CodeAssembler::StringConstant(const char* str) {
- return UncheckedCast<String>(
- HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED)));
+ Handle<String> internalized_string =
+ factory()->InternalizeOneByteString(OneByteVector(str));
+ return UncheckedCast<String>(HeapConstant(internalized_string));
}
TNode<Oddball> CodeAssembler::BooleanConstant(bool value) {
@@ -338,10 +379,10 @@ Node* CodeAssembler::Parameter(int value) {
}
TNode<Context> CodeAssembler::GetJSContextParameter() {
- CallDescriptor* desc = raw_assembler()->call_descriptor();
- DCHECK(desc->IsJSFunctionCall());
+ auto call_descriptor = raw_assembler()->call_descriptor();
+ DCHECK(call_descriptor->IsJSFunctionCall());
return CAST(Parameter(Linkage::GetJSCallContextParamIndex(
- static_cast<int>(desc->JSParameterCount()))));
+ static_cast<int>(call_descriptor->JSParameterCount()))));
}
void CodeAssembler::Return(SloppyTNode<Object> value) {
@@ -422,6 +463,10 @@ Node* CodeAssembler::LoadStackPointer() {
return raw_assembler()->LoadStackPointer();
}
+Node* CodeAssembler::SpeculationPoison() {
+ return raw_assembler()->SpeculationPoison();
+}
+
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(SloppyTNode<Arg1Type> a, \
SloppyTNode<Arg2Type> b) { \
@@ -950,13 +995,13 @@ Node* CodeAssembler::Projection(int index, Node* value) {
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
-
if (if_exception == nullptr) {
// If no handler is supplied, don't add continuations
return;
}
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
Label success(this), exception(this, Label::kDeferred);
success.MergeVariables();
exception.MergeVariables();
@@ -979,10 +1024,10 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(Runtime::FunctionId function,
SloppyTNode<Object> context,
TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(desc->ReturnCount());
+ int return_count = static_cast<int>(call_descriptor->ReturnCount());
Node* centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
@@ -992,7 +1037,8 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(Runtime::FunctionId function,
Node* nodes[] = {centry, args..., ref, arity, context};
CallPrologue();
- Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
+ Node* return_value =
+ raw_assembler()->CallN(call_descriptor, arraysize(nodes), nodes);
CallEpilogue();
return UncheckedCast<Object>(return_value);
}
@@ -1009,10 +1055,10 @@ TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
SloppyTNode<Object> context,
TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(desc->ReturnCount());
+ int return_count = static_cast<int>(call_descriptor->ReturnCount());
Node* centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
@@ -1022,7 +1068,7 @@ TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
Node* nodes[] = {centry, args..., ref, arity, context};
return UncheckedCast<Object>(
- raw_assembler()->TailCallN(desc, arraysize(nodes), nodes));
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
}
// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
@@ -1061,14 +1107,15 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size,
pass_context ? Linkage::kPassContext : Linkage::kNoContext);
CallPrologue();
- Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+ Node* return_value =
+ raw_assembler()->CallN(call_descriptor, input_count, inputs);
CallEpilogue();
return return_value;
}
@@ -1079,14 +1126,14 @@ Node* CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
size_t result_size = 1;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node* nodes[] = {target, args..., context};
CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallStub() for argument counts used by CSA-generated code
@@ -1105,13 +1152,13 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatch(
int stack_parameter_count =
sizeof...(args) - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), 0);
Node* nodes[] = {target, args..., context};
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallJSAndBytecodeDispatch() for argument counts used by
@@ -1127,12 +1174,12 @@ template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
- CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+ auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallBytecodeDispatch() for argument counts used by
@@ -1143,8 +1190,8 @@ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
int input_count, Node* const* inputs) {
- CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
- return raw_assembler()->CallN(desc, input_count, inputs);
+ auto call_descriptor = Linkage::GetSimplifiedCDescriptor(zone(), signature);
+ return raw_assembler()->CallN(call_descriptor, input_count, inputs);
}
Node* CodeAssembler::CallCFunction1(MachineType return_type,
@@ -1593,3 +1640,15 @@ Smi* CheckObjectType(Object* value, Smi* type, String* location) {
} // namespace internal
} // namespace v8
+
+#undef REPEAT_1_TO_2
+#undef REPEAT_1_TO_3
+#undef REPEAT_1_TO_4
+#undef REPEAT_1_TO_5
+#undef REPEAT_1_TO_6
+#undef REPEAT_1_TO_7
+#undef REPEAT_1_TO_8
+#undef REPEAT_1_TO_9
+#undef REPEAT_1_TO_10
+#undef REPEAT_1_TO_11
+#undef REPEAT_1_TO_12
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 9f0d463dc1..1d3abe74f0 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -31,6 +31,11 @@ class JSCollection;
class JSWeakCollection;
class JSWeakMap;
class JSWeakSet;
+class PromiseCapability;
+class PromiseFulfillReactionJobTask;
+class PromiseReaction;
+class PromiseReactionJobTask;
+class PromiseRejectReactionJobTask;
class Factory;
class Zone;
@@ -197,6 +202,7 @@ enum class ObjectType {
class AccessCheckNeeded;
class ClassBoilerplate;
+class BooleanWrapper;
class CompilationCacheTable;
class Constructor;
class Filler;
@@ -208,8 +214,11 @@ class JSSloppyArgumentsObject;
class MapCache;
class MutableHeapNumber;
class NativeContext;
+class NumberWrapper;
+class ScriptWrapper;
class SloppyArgumentsElements;
class StringWrapper;
+class SymbolWrapper;
class Undetectable;
class UniqueName;
class WasmMemoryObject;
@@ -404,6 +413,7 @@ class SloppyTNode : public TNode<T> {
V(IntPtrEqual, BoolT, WordT, WordT) \
V(Uint32LessThan, BoolT, Word32T, Word32T) \
V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \
+ V(Uint32GreaterThan, BoolT, Word32T, Word32T) \
V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
V(UintPtrLessThan, BoolT, WordT, WordT) \
V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \
@@ -491,6 +501,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
V(Word32Not, Word32T, Word32T) \
+ V(WordNot, WordT, WordT) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
@@ -543,7 +554,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Base Assembler
// ===========================================================================
- template <class PreviousType>
+ template <class PreviousType, bool FromTyped>
class CheckedNode {
public:
#ifdef DEBUG
@@ -561,6 +572,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static_assert(std::is_convertible<TNode<A>, TNode<Object>>::value,
"Coercion to untagged values cannot be "
"checked.");
+ static_assert(
+ !FromTyped ||
+ !std::is_convertible<TNode<PreviousType>, TNode<A>>::value,
+ "Unnecessary CAST: types are convertible.");
#ifdef DEBUG
if (FLAG_debug_code) {
Node* function = code_assembler_->ExternalConstant(
@@ -610,13 +625,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<T>::UncheckedCast(value);
}
- CheckedNode<Object> Cast(Node* value, const char* location) {
- return CheckedNode<Object>(value, this, location);
+ CheckedNode<Object, false> Cast(Node* value, const char* location) {
+ return {value, this, location};
}
template <class T>
- CheckedNode<T> Cast(TNode<T> value, const char* location) {
- return CheckedNode<T>(value, this, location);
+ CheckedNode<T, true> Cast(TNode<T> value, const char* location) {
+ return {value, this, location};
}
#ifdef DEBUG
@@ -628,6 +643,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define CAST(x) Cast(x, "")
#endif
+#ifdef V8_EMBEDDED_BUILTINS
+ // Off-heap builtins cannot embed constants within the code object itself,
+ // and thus need to load them from the root list.
+ bool ShouldLoadConstantsFromRootList() const {
+ return (isolate()->serializer_enabled() &&
+ isolate()->builtins_constants_table_builder() != nullptr);
+ }
+
+ TNode<HeapObject> LookupConstant(Handle<HeapObject> object);
+#endif
+
// Constants.
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
@@ -651,6 +677,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<ExternalReference> ExternalConstant(ExternalReference address);
TNode<Float64T> Float64Constant(double value);
TNode<HeapNumber> NaNConstant();
+ TNode<BoolT> Int32TrueConstant() {
+ return ReinterpretCast<BoolT>(Int32Constant(1));
+ }
+ TNode<BoolT> Int32FalseConstant() {
+ return ReinterpretCast<BoolT>(Int32Constant(0));
+ }
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
@@ -702,6 +734,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Access to the stack pointer
Node* LoadStackPointer();
+ // Poison mask for speculation.
+ Node* SpeculationPoison();
+
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
template <class Type>
@@ -1136,23 +1171,17 @@ class TypedCodeAssemblerVariable : public CodeAssemblerVariable {
initial_value) {}
#endif // DEBUG
- template <class U, class = typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<U>>::value>::type>
- operator TNode<U>() const {
- return TNode<T>::UncheckedCast(value());
+ TNode<T> value() const {
+ return TNode<T>::UncheckedCast(CodeAssemblerVariable::value());
}
- template <class U, class = typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<U>>::value>::type>
- operator SloppyTNode<U>() const {
- return value();
- }
- operator Node*() const { return value(); }
void operator=(TNode<T> value) { Bind(value); }
+ void operator=(const TypedCodeAssemblerVariable<T>& variable) {
+ Bind(variable.value());
+ }
private:
using CodeAssemblerVariable::Bind;
- using CodeAssemblerVariable::value;
};
class CodeAssemblerLabel {
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index c6d3174d8c..1298657774 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -215,8 +215,13 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
+inline bool HasCallDescriptorFlag(Instruction* instr,
+ CallDescriptor::Flag flag) {
+ return MiscField::decode(instr->opcode()) & flag;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H
+#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H_
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 0fb38e5933..71b0394eab 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -42,7 +42,8 @@ CodeGenerator::CodeGenerator(
InstructionSequence* code, CompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper, int start_source_position,
JumpOptimizationInfo* jump_opt,
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions)
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ LoadPoisoning load_poisoning)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -63,6 +64,7 @@ CodeGenerator::CodeGenerator(
deoptimization_literals_(zone()),
inlined_function_count_(0),
translations_(zone()),
+ handler_table_offset_(0),
last_lazy_deopt_pc_(0),
caller_registers_saved_(false),
jump_tables_(nullptr),
@@ -72,7 +74,8 @@ CodeGenerator::CodeGenerator(
optimized_out_literal_id_(-1),
source_position_table_builder_(info->SourcePositionRecordingMode()),
protected_instructions_(protected_instructions),
- result_(kSuccess) {
+ result_(kSuccess),
+ load_poisoning_(load_poisoning) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -148,16 +151,36 @@ void CodeGenerator::AssembleCode() {
ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
- // TODO(jupvfranco): This should be the first thing in the code,
- // or otherwise MaybeCallEntryHookDelayed may happen twice (for
- // optimized and deoptimized code).
- // We want to bailout only from JS functions, which are the only ones
+ // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
+ if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
+ info->code_kind() == Code::BYTECODE_HANDLER)) {
+ tasm()->RecordComment("-- Prologue: check code start register --");
+ AssembleCodeStartRegisterCheck();
+ }
+
+ // TODO(jupvfranco): This should be the first thing in the code, otherwise
+ // MaybeCallEntryHookDelayed may happen twice (for optimized and deoptimized
+ // code). We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
+ tasm()->RecordComment("-- Prologue: check for deoptimization --");
BailoutIfDeoptimized();
}
+ // Initialize {kSpeculationPoisonRegister} either by comparing the expected
+ // with the actual call target, or by unconditionally using {-1} initially.
+ // Masking register arguments with it only makes sense in the first case.
+ if (info->is_generating_speculation_poison_on_entry()) {
+ tasm()->RecordComment("-- Prologue: generate speculation poison --");
+ GenerateSpeculationPoison();
+ if (info->is_poisoning_register_arguments()) {
+ AssembleRegisterArgumentPoisoning();
+ }
+ } else {
+ InitializePoisonForLoadsIfNeeded();
+ }
+
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (CompilationInfo::InlinedFunctionHolder& inlined :
@@ -218,6 +241,9 @@ void CodeGenerator::AssembleCode() {
frame_access_state()->MarkHasFrame(block->needs_frame());
tasm()->bind(GetLabel(current_block_));
+
+ TryInsertBranchPoisoning(block);
+
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -287,26 +313,54 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.Finish(tasm()->pc_offset());
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
- result_ = kSuccess;
-}
-Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
- return source_position_table_builder_.ToSourcePositionTable(isolate());
-}
-
-MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
+ // Emit the exception handler table.
if (!handlers_.empty()) {
- Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
- TENURED));
+ handler_table_offset_ = HandlerTable::EmitReturnTableStart(
+ tasm(), static_cast<int>(handlers_.size()));
for (size_t i = 0; i < handlers_.size(); ++i) {
- table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
+ HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
+ handlers_[i].handler->pos());
+ }
+ }
+
+ result_ = kSuccess;
+}
+
+void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
+ // See if our predecessor was a basic block terminated by a branch_and_poison
+ // instruction. If yes, then perform the masking based on the flags.
+ if (block->PredecessorCount() != 1) return;
+ RpoNumber pred_rpo = (block->predecessors())[0];
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_rpo);
+ if (pred->code_start() == pred->code_end()) return;
+ Instruction* instr = code()->InstructionAt(pred->code_end() - 1);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ switch (mode) {
+ case kFlags_branch_and_poison: {
+ BranchInfo branch;
+ RpoNumber target = ComputeBranchInfo(&branch, instr);
+ if (!target.IsValid()) {
+ // Non-trivial branch, add the masking code.
+ FlagsCondition condition = branch.condition;
+ if (branch.false_label == GetLabel(block->rpo_number())) {
+ condition = NegateFlagsCondition(condition);
+ }
+ AssembleBranchPoisoning(condition, instr);
+ }
+ break;
}
- return table;
+ case kFlags_deoptimize_and_poison: {
+ UNREACHABLE();
+ break;
+ }
+ default:
+ break;
}
- return {};
+}
+
+Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
+ return source_position_table_builder_.ToSourcePositionTable(isolate());
}
Handle<Code> CodeGenerator::FinalizeCode() {
@@ -315,18 +369,6 @@ Handle<Code> CodeGenerator::FinalizeCode() {
return Handle<Code>();
}
- // Allocate exception handler table.
- Handle<HandlerTable> table = HandlerTable::Empty(isolate());
- if (!handlers_.empty()) {
- table = Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
- TENURED));
- for (size_t i = 0; i < handlers_.size(); ++i) {
- table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
- }
- }
-
// Allocate the source position table.
Handle<ByteArray> source_positions =
source_position_table_builder_.ToSourcePositionTable(isolate());
@@ -343,8 +385,9 @@ Handle<Code> CodeGenerator::FinalizeCode() {
Handle<Code> result = isolate()->factory()->NewCode(
desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
- table, source_positions, deopt_data, kMovable, info()->stub_key(), true,
- frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset());
+ source_positions, deopt_data, kMovable, info()->stub_key(), true,
+ frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
+ handler_table_offset_);
isolate()->counters()->total_compiled_code_size()->Increment(
result->instruction_size());
@@ -488,6 +531,77 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
pushes->resize(push_count);
}
+CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
+ InstructionOperand* source, InstructionOperand* destination) {
+ if (source->IsConstant()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kConstantToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kConstantToStack;
+ }
+ }
+ DCHECK(LocationOperand::cast(source)->IsCompatible(
+ LocationOperand::cast(destination)));
+ if (source->IsAnyRegister()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kRegisterToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kRegisterToStack;
+ }
+ } else {
+ DCHECK(source->IsAnyStackSlot());
+ if (destination->IsAnyRegister()) {
+ return MoveType::kStackToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kStackToStack;
+ }
+ }
+}
+
+CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
+ InstructionOperand* source, InstructionOperand* destination) {
+ DCHECK(LocationOperand::cast(source)->IsCompatible(
+ LocationOperand::cast(destination)));
+ if (source->IsAnyRegister()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kRegisterToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kRegisterToStack;
+ }
+ } else {
+ DCHECK(source->IsAnyStackSlot());
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kStackToStack;
+ }
+}
+
+RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
+ Instruction* instr) {
+ // Assemble a branch after this instruction.
+ InstructionOperandConverter i(this, instr);
+ RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+ RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
+
+ if (true_rpo == false_rpo) {
+ return true_rpo;
+ }
+ FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+ if (IsNextInAssemblyOrder(true_rpo)) {
+ // true block is next, can fall through if condition negated.
+ std::swap(true_rpo, false_rpo);
+ condition = NegateFlagsCondition(condition);
+ }
+ branch->condition = condition;
+ branch->true_label = GetLabel(true_rpo);
+ branch->false_label = GetLabel(false_rpo);
+ branch->fallthru = IsNextInAssemblyOrder(false_rpo);
+ return RpoNumber::Invalid();
+}
+
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot;
@@ -513,34 +627,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch: {
- // Assemble a branch after this instruction.
- InstructionOperandConverter i(this, instr);
- RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
- RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
-
- if (true_rpo == false_rpo) {
+ case kFlags_branch:
+ case kFlags_branch_and_poison: {
+ BranchInfo branch;
+ RpoNumber target = ComputeBranchInfo(&branch, instr);
+ if (target.IsValid()) {
// redundant branch.
- if (!IsNextInAssemblyOrder(true_rpo)) {
- AssembleArchJump(true_rpo);
+ if (!IsNextInAssemblyOrder(target)) {
+ AssembleArchJump(target);
}
return kSuccess;
}
- if (IsNextInAssemblyOrder(true_rpo)) {
- // true block is next, can fall through if condition negated.
- std::swap(true_rpo, false_rpo);
- condition = NegateFlagsCondition(condition);
- }
- BranchInfo branch;
- branch.condition = condition;
- branch.true_label = GetLabel(true_rpo);
- branch.false_label = GetLabel(false_rpo);
- branch.fallthru = IsNextInAssemblyOrder(false_rpo);
// Assemble architecture-specific branch.
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize: {
+ case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset = MiscField::decode(instr->opcode());
@@ -555,6 +658,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
// Assemble architecture-specific branch.
AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(&continue_label);
+ if (mode == kFlags_deoptimize_and_poison) {
+ AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
+ }
break;
}
case kFlags_set: {
@@ -570,6 +676,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
break;
}
}
+
+ // TODO(jarin) We should thread the flag through rather than set it.
+ if (instr->IsCall()) {
+ InitializePoisonForLoadsIfNeeded();
+ }
+
return kSuccess;
}
@@ -1078,6 +1190,12 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
return exit;
}
+void CodeGenerator::InitializePoisonForLoadsIfNeeded() {
+ if (load_poisoning_ == LoadPoisoning::kDoPoison) {
+ tasm()->ResetSpeculationPoisonRegister();
+ }
+}
+
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 425ea2ebf2..a91ae0212a 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -86,7 +86,8 @@ class CodeGenerator final : public GapResolver::Assembler {
int start_source_position,
JumpOptimizationInfo* jump_opt,
std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions);
+ protected_instructions,
+ LoadPoisoning load_poisoning);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -95,7 +96,6 @@ class CodeGenerator final : public GapResolver::Assembler {
Handle<Code> FinalizeCode();
Handle<ByteArray> GetSourcePositionTable();
- MaybeHandle<HandlerTable> GetHandlerTable() const;
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -122,6 +122,7 @@ class CodeGenerator final : public GapResolver::Assembler {
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
+ size_t GetHandlerTableOffset() const { return handler_table_offset_; }
private:
GapResolver* resolver() { return &resolver_; }
@@ -150,11 +151,25 @@ class CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
+ // Inserts mask update at the beginning of an instruction block if the
+ // predecessor blocks ends with a masking branch.
+ void TryInsertBranchPoisoning(const InstructionBlock* block);
+
+ // Initializes the masking register.
+ // Eventually, this should be always threaded through from the caller
+ // (in the proplogue) or from a callee (after a call).
+ void InitializePoisonForLoadsIfNeeded();
+
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(Instruction* instr,
const InstructionBlock* block);
void AssembleGaps(Instruction* instr);
+ // Compute branch info from given instruction. Returns a valid rpo number
+ // if the branch is redundant, the returned rpo number point to the target
+ // basic block.
+ RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);
+
// Returns true if a instruction is a tail call that needs to adjust the stack
// pointer before execution. The stack slot index to the empty slot above the
// adjusted stack pointer is returned in |slot|.
@@ -179,12 +194,26 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
+ // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
+ // contains the expected pointer to the start of the instruction stream.
+ void AssembleCodeStartRegisterCheck();
+
+ void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
+
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
+ // Generates a mask which can be used to poison values when we detect
+ // the code is executing speculatively.
+ void GenerateSpeculationPoison();
+
+ // Generates code to poison the stack pointer and implicit register arguments
+ // like the context register and the function register.
+ void AssembleRegisterArgumentPoisoning();
+
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -224,6 +253,26 @@ class CodeGenerator final : public GapResolver::Assembler {
PushTypeFlags push_type,
ZoneVector<MoveOperands*>* pushes);
+ class MoveType {
+ public:
+ enum Type {
+ kRegisterToRegister,
+ kRegisterToStack,
+ kStackToRegister,
+ kStackToStack,
+ kConstantToRegister,
+ kConstantToStack
+ };
+
+ // Detect what type of move or swap needs to be performed. Note that these
+ // functions do not take into account the representation (Tagged, FP,
+ // ...etc).
+
+ static Type InferMove(InstructionOperand* source,
+ InstructionOperand* destination);
+ static Type InferSwap(InstructionOperand* source,
+ InstructionOperand* destination);
+ };
// Called before a tail call |instr|'s gap moves are assembled and allows
// gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
// need it before gap moves or conversion of certain gap moves into pushes.
@@ -346,6 +395,7 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_;
TranslationBuffer translations_;
+ int handler_table_offset_;
int last_lazy_deopt_pc_;
// kArchCallCFunction could be reached either:
@@ -368,10 +418,11 @@ class CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
CodeGenResult result_;
+ LoadPoisoning load_poisoning_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_H
+#endif // V8_COMPILER_CODE_GENERATOR_H_
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index d9bc5c8173..2f4888617c 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -38,12 +38,14 @@ Decision DecideCondition(Node* const cond) {
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine)
+ MachineOperatorBuilder* machine,
+ Zone* temp_zone)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
machine_(machine),
- dead_(graph->NewNode(common->Dead())) {
+ dead_(graph->NewNode(common->Dead())),
+ zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -64,6 +66,8 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kSwitch:
+ return ReduceSwitch(node);
default:
break;
}
@@ -138,10 +142,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node, condition_is_true ? common()->DeoptimizeIf(p.kind(), p.reason(),
- VectorSlotPair())
- : common()->DeoptimizeUnless(
- p.kind(), p.reason(), VectorSlotPair()));
+ node,
+ condition_is_true
+ ? common()->DeoptimizeIf(p.kind(), p.reason(), p.feedback())
+ : common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -150,8 +154,8 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
- frame_state, effect, control);
+ common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
+ effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
@@ -414,6 +418,42 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
return NoChange();
}
+Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ Node* const switched_value = node->InputAt(0);
+ Node* const control = node->InputAt(1);
+
+ // Attempt to constant match the switched value against the IfValue cases. If
+ // no case matches, then use the IfDefault. We don't bother marking
+ // non-matching cases as dead code (same for an unused IfDefault), because the
+ // Switch itself will be marked as dead code.
+ Int32Matcher mswitched(switched_value);
+ if (mswitched.HasValue()) {
+ bool matched = false;
+
+ size_t const projection_count = node->op()->ControlOutputCount();
+ Node** projections = zone_->NewArray<Node*>(projection_count);
+ NodeProperties::CollectControlProjections(node, projections,
+ projection_count);
+ for (size_t i = 0; i < projection_count - 1; i++) {
+ Node* if_value = projections[i];
+ DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
+ int32_t value_index = OpParameter<int32_t>(if_value->op());
+ if (value_index == mswitched.Value()) {
+ matched = true;
+ Replace(if_value, control);
+ break;
+ }
+ }
+ if (!matched) {
+ Node* if_default = projections[projection_count - 1];
+ DCHECK_EQ(IrOpcode::kIfDefault, if_default->opcode());
+ Replace(if_default, control);
+ }
+ return Replace(dead());
+ }
+ return NoChange();
+}
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index ea3575aa55..022c4fbe8c 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -26,7 +26,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine);
+ MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final {}
const char* reducer_name() const override { return "CommonOperatorReducer"; }
@@ -41,6 +41,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction ReduceSwitch(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
+ Zone* zone_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 54af052d56..36b1caffa6 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -29,10 +29,27 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
+std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
+ switch (is_safety_check) {
+ case IsSafetyCheck::kSafetyCheck:
+ return os << "SafetyCheck";
+ case IsSafetyCheck::kNoSafetyCheck:
+ return os << "NoSafetyCheck";
+ }
+ UNREACHABLE();
+}
-BranchHint BranchHintOf(const Operator* const op) {
+std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
+ return os << info.hint << "|" << info.is_safety_check;
+}
+
+const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchHint>(op);
+ return OpParameter<BranchOperatorInfo>(op);
+}
+
+BranchHint BranchHintOf(const Operator* const op) {
+ return BranchOperatorInfoOf(op).hint;
}
int ValueInputCountOfReturn(Operator const* const op) {
@@ -44,7 +61,8 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback();
+ lhs.feedback() == rhs.feedback() &&
+ lhs.is_safety_check() == rhs.is_safety_check();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -52,11 +70,12 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
}
size_t hash_value(DeoptimizeParameters p) {
- return base::hash_combine(p.kind(), p.reason(), p.feedback());
+ return base::hash_combine(p.kind(), p.reason(), p.feedback(),
+ p.is_safety_check());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- os << p.kind() << ":" << p.reason();
+ os << p.kind() << ":" << p.reason() << ":" << p.is_safety_check();
if (p.feedback().IsValid()) {
os << "; " << p.feedback();
}
@@ -70,6 +89,32 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
+IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
+ if (op->opcode() == IrOpcode::kBranch) {
+ return BranchOperatorInfoOf(op).is_safety_check;
+ }
+ return DeoptimizeParametersOf(op).is_safety_check();
+}
+
+const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(const Operator* op) {
+ if (op->opcode() == IrOpcode::kBranch) {
+ BranchOperatorInfo info = BranchOperatorInfoOf(op);
+ if (info.is_safety_check == IsSafetyCheck::kSafetyCheck) return op;
+ return Branch(info.hint, IsSafetyCheck::kSafetyCheck);
+ }
+ DeoptimizeParameters p = DeoptimizeParametersOf(op);
+ if (p.is_safety_check() == IsSafetyCheck::kSafetyCheck) return op;
+ switch (op->opcode()) {
+ case IrOpcode::kDeoptimizeIf:
+ return DeoptimizeIf(p.kind(), p.reason(), p.feedback(),
+ IsSafetyCheck::kSafetyCheck);
+ case IrOpcode::kDeoptimizeUnless:
+ return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(),
+ IsSafetyCheck::kSafetyCheck);
+ default:
+ UNREACHABLE();
+ }
+}
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.representation() == rhs.representation() &&
@@ -175,7 +220,7 @@ bool operator!=(RelocatablePtrConstantInfo const& lhs,
}
size_t hash_value(RelocatablePtrConstantInfo const& p) {
- return base::hash_combine(p.value(), p.rmode(), p.type());
+ return base::hash_combine(p.value(), int8_t{p.rmode()}, p.type());
}
std::ostream& operator<<(std::ostream& os,
@@ -365,6 +410,14 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0) \
V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0)
+#define CACHED_BRANCH_LIST(V) \
+ V(None, SafetyCheck) \
+ V(True, SafetyCheck) \
+ V(False, SafetyCheck) \
+ V(None, NoSafetyCheck) \
+ V(True, NoSafetyCheck) \
+ V(False, NoSafetyCheck)
+
#define CACHED_RETURN_LIST(V) \
V(1) \
V(2) \
@@ -417,22 +470,28 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero) \
- V(Eager, Hole) \
- V(Eager, MinusZero) \
- V(Eager, Overflow) \
- V(Eager, Smi)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision) \
- V(Eager, LostPrecisionOrNaN) \
- V(Eager, NotAHeapNumber) \
- V(Eager, NotANumberOrOddball) \
- V(Eager, NotASmi) \
- V(Eager, OutOfBounds) \
- V(Eager, WrongInstanceType) \
- V(Eager, WrongMap)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero, NoSafetyCheck) \
+ V(Eager, DivisionByZero, SafetyCheck) \
+ V(Eager, Hole, NoSafetyCheck) \
+ V(Eager, Hole, SafetyCheck) \
+ V(Eager, MinusZero, NoSafetyCheck) \
+ V(Eager, MinusZero, SafetyCheck) \
+ V(Eager, Overflow, NoSafetyCheck) \
+ V(Eager, Overflow, SafetyCheck) \
+ V(Eager, Smi, SafetyCheck)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision, NoSafetyCheck) \
+ V(Eager, LostPrecision, SafetyCheck) \
+ V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
+ V(Eager, LostPrecisionOrNaN, SafetyCheck) \
+ V(Eager, NotAHeapNumber, SafetyCheck) \
+ V(Eager, NotANumberOrOddball, SafetyCheck) \
+ V(Eager, NotASmi, SafetyCheck) \
+ V(Eager, OutOfBounds, SafetyCheck) \
+ V(Eager, WrongInstanceType, SafetyCheck) \
+ V(Eager, WrongMap, SafetyCheck)
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
@@ -534,18 +593,20 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint kBranchHint>
- struct BranchOperator final : public Operator1<BranchHint> {
+ template <BranchHint hint, IsSafetyCheck is_safety_check>
+ struct BranchOperator final : public Operator1<BranchOperatorInfo> {
BranchOperator()
- : Operator1<BranchHint>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- kBranchHint) {} // parameter
+ : Operator1<BranchOperatorInfo>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ BranchOperatorInfo{hint, is_safety_check}) {} // parameter
};
- BranchOperator<BranchHint::kNone> kBranchNoneOperator;
- BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
- BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+#define CACHED_BRANCH(Hint, IsCheck) \
+ BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
+ kBranch##Hint##IsCheck##Operator;
+ CACHED_BRANCH_LIST(CACHED_BRANCH)
+#undef CACHED_BRANCH
template <int kEffectInputCount>
struct EffectPhiOperator final : public Operator {
@@ -608,7 +669,8 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ IsSafetyCheck::kNoSafetyCheck)) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -616,7 +678,8 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason,
+ IsSafetyCheck is_safety_check>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -624,15 +687,18 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ is_safety_check)) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
- kDeoptimizeIf##Kind##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
+ IsSafetyCheck::k##IsCheck> \
+ kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason,
+ IsSafetyCheck is_safety_check>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -641,12 +707,14 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ is_safety_check)) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason> \
- kDeoptimizeUnless##Kind##Reason##Operator;
+ DeoptimizeReason::k##Reason, \
+ IsSafetyCheck::k##IsCheck> \
+ kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -806,16 +874,15 @@ const Operator* CommonOperatorBuilder::Return(int value_input_count) {
value_input_count + 1, 1, 1, 0, 0, 1); // counts
}
-
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
- switch (hint) {
- case BranchHint::kNone:
- return &cache_.kBranchNoneOperator;
- case BranchHint::kTrue:
- return &cache_.kBranchTrueOperator;
- case BranchHint::kFalse:
- return &cache_.kBranchFalseOperator;
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
+ IsSafetyCheck is_safety_check) {
+#define CACHED_BRANCH(Hint, IsCheck) \
+ if (hint == BranchHint::k##Hint && \
+ is_safety_check == IsSafetyCheck::k##IsCheck) { \
+ return &cache_.kBranch##Hint##IsCheck##Operator; \
}
+ CACHED_BRANCH_LIST(CACHED_BRANCH)
+#undef CACHED_BRANCH
UNREACHABLE();
}
@@ -830,7 +897,8 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback,
+ IsSafetyCheck::kNoSafetyCheck);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -841,16 +909,17 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+ VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && \
+ is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -861,16 +930,17 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && \
+ is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1299,65 +1369,70 @@ const Operator* CommonOperatorBuilder::FrameState(
state_info); // parameter
}
-
-const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
+const Operator* CommonOperatorBuilder::Call(
+ const CallDescriptor* call_descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit CallOperator(const CallDescriptor* descriptor)
+ explicit CallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), "Call",
- descriptor->InputCount() + descriptor->FrameStateCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfEliminatable(descriptor->properties()),
- descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
+ IrOpcode::kCall, call_descriptor->properties(), "Call",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfEliminatable(call_descriptor->properties()),
+ call_descriptor->ReturnCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfNoThrow(call_descriptor->properties()),
+ call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor);
+ return new (zone()) CallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters(
- const CallDescriptor* descriptor) {
+ const CallDescriptor* call_descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit CallOperator(const CallDescriptor* descriptor)
+ explicit CallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCallWithCallerSavedRegisters, descriptor->properties(),
- "CallWithCallerSavedRegisters",
- descriptor->InputCount() + descriptor->FrameStateCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfEliminatable(descriptor->properties()),
- descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
+ IrOpcode::kCallWithCallerSavedRegisters,
+ call_descriptor->properties(), "CallWithCallerSavedRegisters",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfEliminatable(call_descriptor->properties()),
+ call_descriptor->ReturnCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfNoThrow(call_descriptor->properties()),
+ call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor);
+ return new (zone()) CallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::TailCall(
- const CallDescriptor* descriptor) {
+ const CallDescriptor* call_descriptor) {
class TailCallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit TailCallOperator(const CallDescriptor* descriptor)
+ explicit TailCallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
IrOpcode::kTailCall,
- descriptor->properties() | Operator::kNoThrow, "TailCall",
- descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
- 0, 1, descriptor) {}
+ call_descriptor->properties() | Operator::kNoThrow, "TailCall",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ 1, 1, 0, 0, 1, call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) TailCallOperator(descriptor);
+ return new (zone()) TailCallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::Projection(size_t index) {
@@ -1412,6 +1487,7 @@ const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
}
#undef COMMON_CACHED_OP_LIST
+#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
#undef CACHED_END_LIST
#undef CACHED_EFFECT_PHI_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 0e0614dced..b753ed88dc 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -45,6 +45,31 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
+enum class IsSafetyCheck : uint8_t { kSafetyCheck, kNoSafetyCheck };
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
+inline size_t hash_value(IsSafetyCheck is_safety_check) {
+ return static_cast<size_t>(is_safety_check);
+}
+
+struct BranchOperatorInfo {
+ BranchHint hint;
+ IsSafetyCheck is_safety_check;
+};
+
+inline size_t hash_value(const BranchOperatorInfo& info) {
+ return base::hash_combine(info.hint, info.is_safety_check);
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
+
+inline bool operator==(const BranchOperatorInfo& a,
+ const BranchOperatorInfo& b) {
+ return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
+}
+
+V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
+ const Operator* const);
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
// Helper function for return nodes, because returns have a hidden value input.
@@ -54,17 +79,23 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback)
- : kind_(kind), reason_(reason), feedback_(feedback) {}
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check)
+ : kind_(kind),
+ reason_(reason),
+ feedback_(feedback),
+ is_safety_check_(is_safety_check) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const VectorSlotPair& feedback() const { return feedback_; }
+ IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
VectorSlotPair const feedback_;
+ IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -76,6 +107,7 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
+IsSafetyCheck IsSafetyCheckOf(const Operator* op);
class SelectParameters final {
public:
@@ -355,7 +387,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone);
+ const Operator* Branch(
+ BranchHint = BranchHint::kNone,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -366,10 +400,14 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
VectorSlotPair const& feedback);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback);
+ const Operator* DeoptimizeIf(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeUnless(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
@@ -421,10 +459,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* FrameState(BailoutId bailout_id,
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
- const Operator* Call(const CallDescriptor* descriptor);
+ const Operator* Call(const CallDescriptor* call_descriptor);
const Operator* CallWithCallerSavedRegisters(
- const CallDescriptor* descriptor);
- const Operator* TailCall(const CallDescriptor* descriptor);
+ const CallDescriptor* call_descriptor);
+ const Operator* TailCall(const CallDescriptor* call_descriptor);
const Operator* Projection(size_t index);
const Operator* Retain();
const Operator* TypeGuard(Type* type);
@@ -438,6 +476,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info);
+ const Operator* MarkAsSafetyCheck(const Operator* op);
+
private:
Zone* zone() const { return zone_; }
@@ -447,6 +487,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
+// This should go into some common compiler header, but we do not have such a
+// thing at the moment.
+enum class LoadPoisoning { kDoPoison, kDontPoison };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 523d37fe29..6c54f2b036 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -139,18 +139,26 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
if (live_input_count == 0) {
return Replace(dead());
} else if (live_input_count == 1) {
+ NodeVector loop_exits(zone_);
// Due to compaction above, the live input is at offset 0.
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
Replace(use, use->InputAt(0));
} else if (use->opcode() == IrOpcode::kLoopExit &&
use->InputAt(1) == node) {
- RemoveLoopExit(use);
+ // Remember the loop exits so that we can mark their loop input dead.
+ // This has to be done after the use list iteration so that we do
+ // not mutate the use list while it is being iterated.
+ loop_exits.push_back(use);
} else if (use->opcode() == IrOpcode::kTerminate) {
DCHECK_EQ(IrOpcode::kLoop, node->opcode());
Replace(use, dead());
}
}
+ for (Node* loop_exit : loop_exits) {
+ loop_exit->ReplaceInput(1, dead());
+ Revisit(loop_exit);
+ }
return Replace(node->InputAt(0));
}
DCHECK_LE(2, live_input_count);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index a47941e28d..290a3b5f34 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -726,9 +726,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
if (frame_state == nullptr) {
- V8_Fatal(__FILE__, __LINE__, "No frame state (zapped by #%d: %s)",
- frame_state_zapper_->id(),
- frame_state_zapper_->op()->mnemonic());
+ FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
+ frame_state_zapper_->op()->mnemonic());
}
result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
@@ -807,9 +806,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTypeOf:
result = LowerTypeOf(node);
break;
- case IrOpcode::kClassOf:
- result = LowerClassOf(node);
- break;
case IrOpcode::kNewDoubleElements:
result = LowerNewDoubleElements(node);
break;
@@ -830,6 +826,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
break;
case IrOpcode::kDeadValue:
result = LowerDeadValue(node);
+ break;
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -855,10 +852,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerSeqStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
- result = LowerStringCodePointAt(node);
+ result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
break;
case IrOpcode::kSeqStringCodePointAt:
- result = LowerSeqStringCharCodeAt(node);
+ result = LowerSeqStringCodePointAt(node, UnicodeEncodingOf(node->op()));
break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
@@ -866,6 +863,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringToUpperCaseIntl:
result = LowerStringToUpperCaseIntl(node);
break;
+ case IrOpcode::kStringSubstring:
+ result = LowerStringSubstring(node);
+ break;
case IrOpcode::kStringEqual:
result = LowerStringEqual(node);
break;
@@ -972,10 +972,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
}
if ((result ? 1 : 0) != node->op()->ValueOutputCount()) {
- V8_Fatal(__FILE__, __LINE__,
- "Effect control linearizer lowering of '%s':"
- " value output count does not agree.",
- node->op()->mnemonic());
+ FATAL(
+ "Effect control linearizer lowering of '%s':"
+ " value output count does not agree.",
+ node->op()->mnemonic());
}
*effect = gasm()->ExtractCurrentEffect();
@@ -1365,10 +1365,10 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
Node* result =
- __ Call(desc, __ CEntryStubConstant(1), value,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), value,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
@@ -1495,8 +1495,8 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* check = __ Uint32LessThan(value_instance_type,
__ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
- check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1935,7 +1935,8 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* frame_state) {
- CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ CheckTaggedInputParameters const& p =
+ CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_smi = __ MakeLabel();
@@ -1947,7 +1948,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
// In the Smi case, just convert to int32 and then float64.
// Otherwise, check heap numberness and load the number.
Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
- mode, VectorSlotPair(), value, frame_state);
+ p.mode(), p.feedback(), value, frame_state);
__ Goto(&done, number);
__ Bind(&if_smi);
@@ -2042,9 +2043,9 @@ Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kNumberToString);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), argument,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
__ NoContextConstant());
}
@@ -2379,21 +2380,9 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
Callable const callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
- __ NoContextConstant());
-}
-
-Node* EffectControlLinearizer::LowerClassOf(Node* node) {
- Node* obj = node->InputAt(0);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kClassOf);
- Operator::Properties const properties = Operator::kEliminatable;
- CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2403,9 +2392,9 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kToBoolean);
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2583,10 +2572,10 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kNewArgumentsElements);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), frame, length,
- __ SmiConstant(mapped_count), __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
+ length, __ SmiConstant(mapped_count), __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
@@ -2656,9 +2645,9 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kSameValue);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -2678,9 +2667,9 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), string,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
__ NoContextConstant());
}
@@ -2692,44 +2681,174 @@ Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringCharAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ position, __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kStringCharCodeAt);
- Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
- MachineType::TaggedSigned());
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ // We need a loop here to properly deal with indirect strings
+ // (SlicedString, ConsString and ThinString).
+ auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32);
+ auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32);
+ auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
+ __ Goto(&loop, receiver, position);
+ __ Bind(&loop);
+ {
+ Node* receiver = loop.PhiAt(0);
+ Node* position = loop.PhiAt(1);
+ Node* receiver_map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* receiver_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), receiver_map);
+ Node* receiver_representation = __ Word32And(
+ receiver_instance_type, __ Int32Constant(kStringRepresentationMask));
+
+ // Dispatch on the current {receiver}s string representation.
+ auto if_seqstring = __ MakeLabel();
+ auto if_consstring = __ MakeLabel();
+ auto if_thinstring = __ MakeLabel();
+ auto if_externalstring = __ MakeLabel();
+ auto if_slicedstring = __ MakeLabel();
+ auto if_runtime = __ MakeDeferredLabel();
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kSeqStringTag)),
+ &if_seqstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kConsStringTag)),
+ &if_consstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kThinStringTag)),
+ &if_thinstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kExternalStringTag)),
+ &if_externalstring);
+ __ Branch(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kSlicedStringTag)),
+ &if_slicedstring, &if_runtime);
+
+ __ Bind(&if_seqstring);
+ {
+ Node* receiver_is_onebyte = __ Word32Equal(
+ __ Word32Equal(__ Word32And(receiver_instance_type,
+ __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kTwoByteStringTag)),
+ __ Int32Constant(0));
+ Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte);
+ __ Goto(&loop_done, result);
+ }
+
+ __ Bind(&if_thinstring);
+ {
+ Node* receiver_actual =
+ __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
+ __ Goto(&loop_next, receiver_actual, position);
+ }
+
+ __ Bind(&if_consstring);
+ {
+ Node* receiver_second =
+ __ LoadField(AccessBuilder::ForConsStringSecond(), receiver);
+ __ GotoIfNot(__ WordEqual(receiver_second, __ EmptyStringConstant()),
+ &if_runtime);
+ Node* receiver_first =
+ __ LoadField(AccessBuilder::ForConsStringFirst(), receiver);
+ __ Goto(&loop_next, receiver_first, position);
+ }
+
+ __ Bind(&if_externalstring);
+ {
+ // We need to bailout to the runtime for short external strings.
+ __ GotoIf(__ Word32Equal(
+ __ Word32And(receiver_instance_type,
+ __ Int32Constant(kShortExternalStringMask)),
+ __ Int32Constant(kShortExternalStringTag)),
+ &if_runtime);
+
+ Node* receiver_data = __ LoadField(
+ AccessBuilder::ForExternalStringResourceData(), receiver);
+
+ auto if_onebyte = __ MakeLabel();
+ auto if_twobyte = __ MakeLabel();
+ __ Branch(
+ __ Word32Equal(__ Word32And(receiver_instance_type,
+ __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kTwoByteStringTag)),
+ &if_twobyte, &if_onebyte);
+
+ __ Bind(&if_onebyte);
+ {
+ Node* result = __ Load(MachineType::Uint8(), receiver_data,
+ ChangeInt32ToIntPtr(position));
+ __ Goto(&loop_done, result);
+ }
+
+ __ Bind(&if_twobyte);
+ {
+ Node* result = __ Load(
+ MachineType::Uint16(), receiver_data,
+ __ Word32Shl(ChangeInt32ToIntPtr(position), __ Int32Constant(1)));
+ __ Goto(&loop_done, result);
+ }
+ }
+
+ __ Bind(&if_slicedstring);
+ {
+ Node* receiver_offset =
+ __ LoadField(AccessBuilder::ForSlicedStringOffset(), receiver);
+ Node* receiver_parent =
+ __ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
+ __ Goto(&loop_next, receiver_parent,
+ __ Int32Add(position, ChangeSmiToInt32(receiver_offset)));
+ }
+
+ __ Bind(&if_runtime);
+ {
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kStringCharCodeAt;
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ Node* result =
+ __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
+ ChangeInt32ToSmi(position),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ __ Goto(&loop_done, ChangeSmiToInt32(result));
+ }
+
+ __ Bind(&loop_next);
+ __ Goto(&loop, loop_next.PhiAt(0), loop_next.PhiAt(1));
+ }
+ __ Bind(&loop_done);
+ return loop_done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(
+ Node* node, UnicodeEncoding encoding) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
+ Builtins::Name builtin = encoding == UnicodeEncoding::UTF16
+ ? Builtins::kStringCodePointAtUTF16
+ : Builtins::kStringCodePointAtUTF32;
+
+ Callable const callable = Builtins::CallableFor(isolate(), builtin);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
MachineType::TaggedSigned());
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ position, __ NoContextConstant());
}
-Node* EffectControlLinearizer::LoadFromString(Node* receiver, Node* position,
- Node* is_one_byte) {
+Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
+ Node* is_one_byte) {
auto one_byte_load = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord32);
__ GotoIf(is_one_byte, &one_byte_load);
@@ -2756,7 +2875,7 @@ Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
__ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
__ Int32Constant(kOneByteStringTag));
- return LoadFromString(receiver, position, is_one_byte);
+ return LoadFromSeqString(receiver, position, is_one_byte);
}
Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
@@ -2770,7 +2889,7 @@ Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
__ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
__ Int32Constant(kOneByteStringTag));
- Node* first_char_code = LoadFromString(receiver, position, is_one_byte);
+ Node* first_char_code = LoadFromSeqString(receiver, position, is_one_byte);
auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
@@ -2779,16 +2898,18 @@ Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
__ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
__ Int32Constant(0xD800));
// Return first character code.
- __ GotoIf(first_out, &return_result, first_char_code);
+ __ GotoIfNot(first_out, &return_result, first_char_code);
// Check if position + 1 is still in range.
- Node* length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ Node* length = ChangeSmiToInt32(
+ __ LoadField(AccessBuilder::ForStringLength(), receiver));
Node* next_position = __ Int32Add(position, __ Int32Constant(1));
Node* next_position_in_range = __ Int32LessThan(next_position, length);
- __ GotoIf(next_position_in_range, &return_result, first_char_code);
+ __ GotoIfNot(next_position_in_range, &return_result, first_char_code);
// Load second character code.
- Node* second_char_code = LoadFromString(receiver, next_position, is_one_byte);
- // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* second_char_code =
+ LoadFromSeqString(receiver, next_position, is_one_byte);
+ // Check if second character code is outside of interval [0xDC00, 0xDFFF].
Node* second_out =
__ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
__ Int32Constant(0xDC00));
@@ -2862,12 +2983,12 @@ Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
{
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringCharFromCode;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- Node* vtrue1 =
- __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
- __ ExternalConstant(ExternalReference(id, isolate())),
- __ Int32Constant(1), __ NoContextConstant());
+ Node* vtrue1 = __ Call(
+ call_descriptor, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
__ Goto(&done, vtrue1);
}
__ Bind(&done);
@@ -2883,9 +3004,9 @@ Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringToLowerCaseIntl);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), receiver,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
__ NoContextConstant());
}
@@ -2893,9 +3014,9 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* receiver = node->InputAt(0);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- return __ Call(desc, __ CEntryStubConstant(1), receiver,
+ return __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
}
@@ -3048,10 +3169,10 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringIndexOf);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
- position, __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
+ search_string, position, __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
@@ -3067,12 +3188,27 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* start = ChangeInt32ToIntPtr(node->InputAt(1));
+ Node* end = ChangeInt32ToIntPtr(node->InputAt(2));
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringSubstring);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ start, end, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
return LowerStringComparison(
Builtins::CallableFor(isolate(), Builtins::kStringEqual), node);
@@ -3176,10 +3312,10 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
builder.AddParam(MachineType::AnyTagged());
Node* try_internalize_string_function = __ ExternalConstant(
ExternalReference::try_internalize_string_function(isolate()));
- CallDescriptor const* const desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
- Node* val_internalized =
- __ Call(common()->Call(desc), try_internalize_string_function, val);
+ Node* val_internalized = __ Call(common()->Call(call_descriptor),
+ try_internalize_string_function, val);
// Now see if the results match.
__ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
@@ -3218,10 +3354,14 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ return __ WordShl(ChangeInt32ToIntPtr(value), SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
if (machine()->Is64()) {
value = __ ChangeInt32ToInt64(value);
}
- return __ WordShl(value, SmiShiftBitsConstant());
+ return value;
}
Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
@@ -3350,10 +3490,10 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
- __ NoContextConstant());
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ object, __ NoContextConstant());
__ Goto(&done, result);
__ Bind(&done);
@@ -3386,11 +3526,12 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
: Builtins::CallableFor(isolate(),
Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
properties);
- Node* new_elements = __ Call(desc, __ HeapConstant(callable.code()), object,
- ChangeInt32ToSmi(index), __ NoContextConstant());
+ Node* new_elements =
+ __ Call(call_descriptor, __ HeapConstant(callable.code()), object,
+ ChangeInt32ToSmi(index), __ NoContextConstant());
// Ensure that we were able to grow the {elements}.
__ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
@@ -3429,9 +3570,9 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
// Instance migration, call out to the runtime for {object}.
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), object, target_map,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(2), __ NoContextConstant());
break;
@@ -3557,8 +3698,9 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
// Compute the effective storage pointer, handling the case where the
// {external} pointer is the effective storage pointer (i.e. the {base}
// is Smi zero).
- Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
- base, external);
+ Node* storage = IntPtrMatcher(base).Is(0)
+ ? external
+ : __ UnsafePointerAdd(base, external);
// Perform the actual typed element access.
return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
@@ -3580,8 +3722,9 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
// Compute the effective storage pointer, handling the case where the
// {external} pointer is the effective storage pointer (i.e. the {base}
// is Smi zero).
- Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
- base, external);
+ Node* storage = IntPtrMatcher(base).Is(0)
+ ? external
+ : __ UnsafePointerAdd(base, external);
// Perform the actual typed element access.
__ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
@@ -3604,9 +3747,9 @@ void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
// Instance migration, call out to the runtime for {array}.
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), array, target_map,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), array, target_map,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(2), __ NoContextConstant());
}
@@ -3951,9 +4094,9 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
AbortReason reason = AbortReasonOf(node->op());
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kAbort;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1),
+ __ Call(call_descriptor, __ CEntryStubConstant(1),
jsgraph()->SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
@@ -3988,13 +4131,13 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
- native_context);
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ value, native_context);
__ Goto(&done_convert, result);
__ Bind(&done_convert);
@@ -4024,13 +4167,13 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
- native_context);
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ value, native_context);
__ Goto(&done_convert, result);
// Replace the {value} with the {global_proxy}.
@@ -4381,11 +4524,11 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kFindOrderedHashMapEntry);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
- return __ Call(desc, __ HeapConstant(callable.code()), table, key,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
+ key, __ NoContextConstant());
}
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 47b1586d6d..21425d3ab0 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -119,13 +119,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringSubstring(Node* node);
Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
@@ -136,7 +137,6 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
Node* LowerTypeOf(Node* node);
- Node* LowerClassOf(Node* node);
Node* LowerToBoolean(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -176,13 +176,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
- Node* LoadFromString(Node* receiver, Node* position, Node* is_one_byte);
+ Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 16a9d78faf..66715b9a94 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -218,9 +218,8 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
if (const VirtualObject* vobject =
analysis_result().GetVirtualObject(node)) {
if (!vobject->HasEscaped()) {
- V8_Fatal(__FILE__, __LINE__,
- "Escape analysis failed to remove node %s#%d\n",
- node->op()->mnemonic(), node->id());
+ FATAL("Escape analysis failed to remove node %s#%d\n",
+ node->op()->mnemonic(), node->id());
}
}
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 0a0e3ec868..7d55cc29d3 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -79,12 +79,15 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
}
namespace {
+
Node* CreateBuiltinContinuationFrameStateCommon(
- JSGraph* js_graph, Builtins::Name name, Node* context, Node** parameters,
- int parameter_count, Node* outer_frame_state, Handle<JSFunction> function) {
- Isolate* isolate = js_graph->isolate();
- Graph* graph = js_graph->graph();
- CommonOperatorBuilder* common = js_graph->common();
+ JSGraph* jsgraph, FrameStateType frame_type, Builtins::Name name,
+ Node* closure, Node* context, Node** parameters, int parameter_count,
+ Node* outer_frame_state,
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) {
+ Isolate* const isolate = jsgraph->isolate();
+ Graph* const graph = jsgraph->graph();
+ CommonOperatorBuilder* const common = jsgraph->common();
BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
Callable callable = Builtins::CallableFor(isolate, name);
@@ -93,35 +96,26 @@ Node* CreateBuiltinContinuationFrameStateCommon(
common->StateValues(parameter_count, SparseInputMask::Dense());
Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
- FrameStateType frame_type =
- function.is_null() ? FrameStateType::kBuiltinContinuation
- : FrameStateType::kJavaScriptBuiltinContinuation;
const FrameStateFunctionInfo* state_info =
- common->CreateFrameStateFunctionInfo(
- frame_type, parameter_count, 0,
- function.is_null() ? Handle<SharedFunctionInfo>()
- : Handle<SharedFunctionInfo>(function->shared()));
+ common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
+ shared);
const Operator* op = common->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
- Node* function_node = function.is_null() ? js_graph->UndefinedConstant()
- : js_graph->HeapConstant(function);
-
Node* frame_state = graph->NewNode(
- op, params_node, js_graph->EmptyStateValues(),
- js_graph->EmptyStateValues(), context, function_node, outer_frame_state);
+ op, params_node, jsgraph->EmptyStateValues(), jsgraph->EmptyStateValues(),
+ context, closure, outer_frame_state);
return frame_state;
}
+
} // namespace
-Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
- Builtins::Name name,
- Node* context, Node** parameters,
- int parameter_count,
- Node* outer_frame_state,
- ContinuationFrameStateMode mode) {
- Isolate* isolate = js_graph->isolate();
+Node* CreateStubBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Builtins::Name name, Node* context,
+ Node* const* parameters, int parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = jsgraph->isolate();
Callable callable = Builtins::CallableFor(isolate, name);
CallInterfaceDescriptor descriptor = callable.descriptor();
@@ -142,18 +136,18 @@ Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
}
return CreateBuiltinContinuationFrameStateCommon(
- js_graph, name, context, actual_parameters.data(),
- static_cast<int>(actual_parameters.size()), outer_frame_state,
- Handle<JSFunction>());
+ jsgraph, FrameStateType::kBuiltinContinuation, name,
+ jsgraph->UndefinedConstant(), context, actual_parameters.data(),
+ static_cast<int>(actual_parameters.size()), outer_frame_state);
}
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* js_graph, Handle<JSFunction> function, Builtins::Name name,
- Node* target, Node* context, Node** stack_parameters,
+ JSGraph* jsgraph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode) {
- Isolate* isolate = js_graph->isolate();
- Callable callable = Builtins::CallableFor(isolate, name);
+ Isolate* const isolate = jsgraph->isolate();
+ Callable const callable = Builtins::CallableFor(isolate, name);
// Lazy deopt points where the frame state is assocated with a call get an
// additional parameter for the return result from the call that's added by
@@ -165,8 +159,8 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
(mode == ContinuationFrameStateMode::EAGER ? 0 : 1));
Node* argc =
- js_graph->Constant(stack_parameter_count -
- (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
+ jsgraph->Constant(stack_parameter_count -
+ (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
// Stack parameters first. They must be first because the receiver is expected
// to be the second value in the translation when creating stack crawls
@@ -179,12 +173,13 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
// Register parameters follow stack paraemters. The context will be added by
// instruction selector during FrameState translation.
actual_parameters.push_back(target);
- actual_parameters.push_back(js_graph->UndefinedConstant());
+ actual_parameters.push_back(jsgraph->UndefinedConstant());
actual_parameters.push_back(argc);
return CreateBuiltinContinuationFrameStateCommon(
- js_graph, name, context, &actual_parameters[0],
- static_cast<int>(actual_parameters.size()), outer_frame_state, function);
+ jsgraph, FrameStateType::kJavaScriptBuiltinContinuation, name, target,
+ context, &actual_parameters[0],
+ static_cast<int>(actual_parameters.size()), outer_frame_state, shared);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index ac00f8c129..fb3d42ff41 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -145,16 +145,14 @@ static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
enum class ContinuationFrameStateMode { EAGER, LAZY };
-Node* CreateStubBuiltinContinuationFrameState(JSGraph* graph,
- Builtins::Name name,
- Node* context, Node** parameters,
- int parameter_count,
- Node* outer_frame_state,
- ContinuationFrameStateMode mode);
+Node* CreateStubBuiltinContinuationFrameState(
+ JSGraph* graph, Builtins::Name name, Node* context, Node* const* parameters,
+ int parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* graph, Handle<JSFunction> function, Builtins::Name name,
- Node* target, Node* context, Node** stack_parameters,
+ JSGraph* graph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode);
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
new file mode 100644
index 0000000000..2345f1d360
--- /dev/null
+++ b/deps/v8/src/compiler/functional-list.h
@@ -0,0 +1,122 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FUNCTIONAL_LIST_H_
+#define V8_COMPILER_FUNCTIONAL_LIST_H_
+
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A generic stack implemented as a purely functional singly-linked list, which
+// results in an O(1) copy operation. It is the equivalent of functional lists
+// in ML-like languages, with the only difference that it also caches the length
+// of the list in each node.
+// TODO(tebbi): Use this implementation also for RedundancyElimination.
+template <class A>
+class FunctionalList {
+ private:
+ struct Cons : ZoneObject {
+ Cons(A top, Cons* rest)
+ : top(std::move(top)), rest(rest), size(1 + (rest ? rest->size : 0)) {}
+ A const top;
+ Cons* const rest;
+ size_t const size;
+ };
+
+ public:
+ FunctionalList() : elements_(nullptr) {}
+
+ bool operator==(const FunctionalList<A>& other) const {
+ if (Size() != other.Size()) return false;
+ iterator it = begin();
+ iterator other_it = other.begin();
+ while (true) {
+ if (it == other_it) return true;
+ if (*it != *other_it) return false;
+ ++it;
+ ++other_it;
+ }
+ }
+ bool operator!=(const FunctionalList<A>& other) const {
+ return !(*this == other);
+ }
+
+ const A& Front() const {
+ DCHECK_GT(Size(), 0);
+ return elements_->top;
+ }
+
+ FunctionalList Rest() const {
+ FunctionalList result = *this;
+ result.DropFront();
+ return result;
+ }
+
+ void DropFront() {
+ CHECK_GT(Size(), 0);
+ elements_ = elements_->rest;
+ }
+
+ void PushFront(A a, Zone* zone) {
+ elements_ = new (zone) Cons(std::move(a), elements_);
+ }
+
+ // If {hint} happens to be exactly what we want to allocate, avoid allocation
+ // by reusing {hint}.
+ void PushFront(A a, Zone* zone, FunctionalList hint) {
+ if (hint.Size() == Size() + 1 && hint.Front() == a &&
+ hint.Rest() == *this) {
+ *this = hint;
+ } else {
+ PushFront(a, zone);
+ }
+ }
+
+ // Drop elements until the current stack is equal to the tail shared with
+ // {other}. The shared tail must not only be equal, but also refer to the
+ // same memory.
+ void ResetToCommonAncestor(FunctionalList other) {
+ while (other.Size() > Size()) other.DropFront();
+ while (other.Size() < Size()) DropFront();
+ while (elements_ != other.elements_) {
+ DropFront();
+ other.DropFront();
+ }
+ }
+
+ size_t Size() const { return elements_ ? elements_->size : 0; }
+
+ class iterator {
+ public:
+ explicit iterator(Cons* cur) : current_(cur) {}
+
+ const A& operator*() const { return current_->top; }
+ iterator& operator++() {
+ current_ = current_->rest;
+ return *this;
+ }
+ bool operator==(const iterator& other) const {
+ return this->current_ == other.current_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+
+ private:
+ Cons* current_;
+ };
+
+ iterator begin() const { return iterator(elements_); }
+ iterator end() const { return iterator(nullptr); }
+
+ private:
+ Cons* elements_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FUNCTIONAL_LIST_H_
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index a0b2e0ff0a..676860fdcd 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -244,10 +244,10 @@ Operator const* GraphAssembler::ToNumberOperator() {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kEliminatable);
- to_number_operator_.set(common()->Call(desc));
+ to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 9ae74d0df5..f3dd4e70f9 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -220,7 +220,7 @@ class GraphAssembler {
Node* DeoptimizeIfNot(DeoptimizeReason reason, VectorSlotPair const& feedback,
Node* condition, Node* frame_state);
template <typename... Args>
- Node* Call(const CallDescriptor* desc, Args... args);
+ Node* Call(const CallDescriptor* call_descriptor, Args... args);
template <typename... Args>
Node* Call(const Operator* op, Args... args);
@@ -406,8 +406,9 @@ void GraphAssembler::GotoIfNot(Node* condition,
}
template <typename... Args>
-Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
- const Operator* op = common()->Call(desc);
+Node* GraphAssembler::Call(const CallDescriptor* call_descriptor,
+ Args... args) {
+ const Operator* op = common()->Call(call_descriptor);
return Call(op, args...);
}
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 517f71e955..adb97ddf4d 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -23,6 +23,8 @@ class Node;
// out-of-line data associated with each node.
typedef uint32_t NodeId;
+// Possible outcomes for decisions.
+enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
// Represents the result of trying to reduce a node in the graph.
class Reduction final {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 47ded6a30c..91df483622 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -372,6 +372,17 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_MOVX(mov_instr) \
+ do { \
+ if (instr->addressing_mode() != kMode_None) { \
+ __ mov_instr(i.OutputRegister(), i.MemoryOperand()); \
+ } else if (instr->InputAt(0)->IsRegister()) { \
+ __ mov_instr(i.OutputRegister(), i.InputRegister(0)); \
+ } else { \
+ __ mov_instr(i.OutputRegister(), i.InputOperand(0)); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -484,32 +495,52 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ push(eax); // Push eax so we can use it as a scratch register.
+ __ ComputeCodeStartAddress(eax);
+ __ cmp(eax, kJavaScriptCallCodeStartRegister);
+ __ Assert(equal, AbortReason::kWrongFunctionCodeStart);
+ __ pop(eax); // Restore eax.
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ call(&current);
- int pc = __ pc_offset();
- __ bind(&current);
- // In order to get the address of the current instruction, we first need
- // to use a call and then use a pop, thus pushing the return address to
- // the stack and then popping it into the register.
- __ pop(ecx);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ mov(ecx, Operand(ecx, offset));
- __ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ mov(ebx, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ test(FieldOperand(ebx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ __ push(eax); // Push eax so we can use it as a scratch register.
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(eax);
+ __ mov(kSpeculationPoisonRegister, Immediate(0));
+ __ cmp(kJavaScriptCallCodeStartRegister, eax);
+ __ mov(eax, Immediate(-1));
+ __ cmov(equal, kSpeculationPoisonRegister, eax);
+
+ __ pop(eax); // Restore eax.
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kSpeculationPoisonRegister);
+ __ and_(esp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -524,7 +555,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -537,11 +572,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (info()->IsWasm()) {
__ wasm_call(wasm_code, RelocInfo::WASM_CALL);
} else {
- __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
}
} else {
Register reg = i.InputRegister(0);
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -559,7 +602,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -576,7 +623,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -585,7 +636,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -597,6 +652,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -1403,10 +1459,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
- __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movsx_b);
break;
case kIA32Movzxbl:
- __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzx_b);
break;
case kIA32Movb: {
size_t index = 0;
@@ -1419,10 +1475,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Movsxwl:
- __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movsx_w);
break;
case kIA32Movzxwl:
- __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzx_w);
break;
case kIA32Movw: {
size_t index = 0;
@@ -2371,6 +2427,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+
+#define I8x16_SPLAT(reg, scratch, v) \
+ __ Move(reg, static_cast<uint32_t>(v)); \
+ __ Pxor(scratch, scratch); \
+ __ Pshufb(reg, scratch)
+
+ case kSSEI8x16Shl: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = 0F0F ... 0F0F (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
+
+ // src = src & tmp
+ // => 0A0a ... 0A0a
+ __ pand(src, tmp);
+
+ // src = src << shift
+ // => A0a0 ... A0a0 (shift=4)
+ __ pslld(src, shift);
+ break;
+ }
+ case kAVXI8x16Shl: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp =
+ dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = 0F0F ... 0F0F (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
+
+ // dst = src & tmp
+ // => 0A0a ... 0A0a
+ __ vpand(dst, src, tmp);
+
+ // dst = dst << shift
+ // => A0a0 ... A0a0 (shift=4)
+ __ vpslld(dst, dst, shift);
+ break;
+ }
+ case kSSEI8x16ShrS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // I16x8 view of I8x16
+ // src = AAaa AAaa ... AAaa AAaa
+
+ // tmp = aa00 aa00 ... aa00 aa00
+ __ movaps(tmp, src);
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
+ __ psllw(tmp, 8);
+
+ // src = I16x8ShrS(src, shift)
+ // => SAAa SAAa ... SAAa SAAa (shift=4)
+ __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ psraw(src, shift);
+
+ // tmp = I16x8ShrS(tmp, shift)
+ // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
+ __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ psraw(tmp, shift);
+
+ // src = I16x8And(src, 0xff00)
+ // => SA00 SA00 ... SA00 SA00
+ __ pand(src, kScratchDoubleReg);
+
+ // tmp = I16x8ShrU(tmp, 8)
+ // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
+ __ psrlw(tmp, 8);
+
+ // src = I16x8Or(src, tmp)
+ // => SASa SASa ... SASa SASa (shift=4)
+ __ por(src, tmp);
+ break;
+ }
+ case kAVXI8x16ShrS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // I16x8 view of I8x16
+ // src = AAaa AAaa ... AAaa AAaa
+
+ // tmp = aa00 aa00 ... aa00 aa00
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
+ __ vpsllw(tmp, src, 8);
+
+ // dst = I16x8ShrS(src, shift)
+ // => SAAa SAAa ... SAAa SAAa (shift=4)
+ __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpsraw(dst, src, shift);
+
+ // tmp = I16x8ShrS(tmp, shift)
+ // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
+ __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpsraw(tmp, tmp, shift);
+
+ // dst = I16x8And(dst, 0xff00)
+ // => SA00 SA00 ... SA00 SA00
+ __ vpand(dst, dst, kScratchDoubleReg);
+
+ // tmp = I16x8ShrU(tmp, 8)
+ // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
+ __ vpsrlw(tmp, tmp, 8);
+
+ // dst = I16x8Or(dst, tmp)
+ // => SASa SASa ... SASa SASa (shift=4)
+ __ vpor(dst, dst, tmp);
+ break;
+ }
case kSSEI8x16Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddb(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2415,6 +2591,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI8x16Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+
+ // t0 = 00AA 00AA ... 00AA 00AA
+ // t1 = 00BB 00BB ... 00BB 00BB
+ __ movaps(t0, left);
+ __ movaps(t1, right);
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
+ __ psrlw(t0, 8);
+ __ psrlw(t1, 8);
+
+ // left = I16x8Mul(left, right)
+ // => __pp __pp ... __pp __pp
+ // t0 = I16x8Mul(t0, t1)
+ // => __PP __PP ... __PP __PP
+ __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ pmullw(t0, t1);
+ __ pmullw(left, right);
+ __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+
+ // t0 = I16x8Shl(t0, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ psllw(t0, 8);
+
+ // left = I16x8And(left, 0x00ff)
+ // => 00pp 00pp ... 00pp 00pp
+ __ pand(left, kScratchDoubleReg);
+
+ // left = I16x8Or(left, t0)
+ // => PPpp PPpp ... PPpp PPpp
+ __ por(left, t0);
+ break;
+ }
+ case kAVXI8x16Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+
+ // t0 = 00AA 00AA ... 00AA 00AA
+ // t1 = 00BB 00BB ... 00BB 00BB
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
+ __ vpsrlw(t0, left, 8);
+ __ vpsrlw(t1, right, 8);
+
+ // dst = I16x8Mul(left, right)
+ // => __pp __pp ... __pp __pp
+ __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpmullw(dst, left, right);
+
+ // t0 = I16x8Mul(t0, t1)
+ // => __PP __PP ... __PP __PP
+ __ vpmullw(t0, t0, t1);
+ __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+
+ // t0 = I16x8Shl(t0, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ vpsllw(t0, t0, 8);
+
+ // dst = I16x8And(dst, 0x00ff)
+ // => 00pp 00pp ... 00pp 00pp
+ __ vpand(dst, dst, kScratchDoubleReg);
+
+ // dst = I16x8Or(dst, t0)
+ // => PPpp PPpp ... PPpp PPpp
+ __ vpor(dst, dst, t0);
+ break;
+ }
case kSSEI8x16MinS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
@@ -2516,6 +2774,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI8x16ShrU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = F0F0 ... F0F0 (shift=4)
+
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift); // needn't byte cast
+
+ // src = src & tmp
+ // => A0a0 ... A0a0
+ __ pand(src, tmp);
+
+ // src = src >> shift
+ // => 0A0a ... 0A0a (shift=4)
+ __ psrld(src, shift);
+ break;
+ }
+ case kAVXI8x16ShrU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp =
+ dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = F0F0 ... F0F0 (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift);
+
+ // src = src & tmp
+ // => A0a0 ... A0a0
+ __ vpand(dst, src, tmp);
+
+ // dst = dst >> shift
+ // => 0A0a ... 0A0a (shift=4)
+ __ vpsrld(dst, dst, shift);
+ break;
+ }
+#undef I8x16_SPLAT
case kSSEI8x16MinU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pminub(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2631,87 +2931,106 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSES128Select: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Mask used here is stored in dst.
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ andps(dst, kScratchDoubleReg);
+ __ xorps(dst, i.InputSimd128Register(2));
+ break;
+ }
+ case kAVXS128Select: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ vxorps(kScratchDoubleReg, i.InputSimd128Register(2),
+ i.InputOperand(1));
+ __ vandps(dst, kScratchDoubleReg, i.InputOperand(0));
+ __ vxorps(dst, dst, i.InputSimd128Register(2));
+ break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
- case kAtomicExchangeInt8: {
+ case kWord32AtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeInt16: {
+ case kWord32AtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kAtomicCompareExchangeInt8: {
+ case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kAtomicCompareExchangeUint8: {
+ case kWord32AtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kAtomicCompareExchangeInt16: {
+ case kWord32AtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kAtomicCompareExchangeUint16: {
+ case kWord32AtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kAtomicCompareExchangeWord32: {
+ case kWord32AtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: { \
+ case kWord32Atomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kAtomic##op##Uint8: { \
+ case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kAtomic##op##Int16: { \
+ case kWord32Atomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kAtomic##op##Uint16: { \
+ case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kAtomic##op##Word32: { \
+ case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -2721,14 +3040,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -2798,6 +3117,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2843,8 +3167,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
// Use ecx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), ecx);
} else {
@@ -3070,8 +3394,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
// ^ esp ^ ebp
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
int pushed = 0;
@@ -3084,14 +3408,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3099,8 +3423,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3115,7 +3439,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm() && shrink_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
@@ -3174,9 +3498,9 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
@@ -3191,10 +3515,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Might need ecx for scratch if pop_size is too big or if there is a variable
// pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
IA32OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now if they always have the same
@@ -3211,8 +3535,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
AssembleDeconstructFrame();
}
}
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
if (pop->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
@@ -3231,119 +3555,129 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mov(dst, src);
- } else {
- Operand dst = g.ToOperand(destination);
- __ push(src);
- __ pop(dst);
- }
- } else if (source->IsConstant()) {
- Constant src_constant = g.ToConstant(source);
- if (src_constant.type() == Constant::kHeapObject) {
- Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, src);
+ // Dispatch on the source and destination operand kinds.
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ mov(g.ToRegister(destination), g.ToRegister(source));
} else {
- DCHECK(destination->IsStackSlot());
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
+ DCHECK(source->IsFPRegister());
+ __ movaps(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
- } else if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (destination->IsStackSlot()) {
+ return;
+ case MoveType::kRegisterToStack: {
Operand dst = g.ToOperand(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (src_constant.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src = src_constant.ToFloat32AsInt();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ if (source->IsRegister()) {
+ __ mov(dst, g.ToRegister(source));
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ Move(dst, Immediate(src));
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64().AsUint64();
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ Operand src = g.ToOperand(source);
+ if (source->IsStackSlot()) {
+ __ mov(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.ToOperand(destination, kPointerSize);
- __ Move(dst0, Immediate(lower));
- __ Move(dst1, Immediate(upper));
+ DCHECK(source->IsFPStackSlot());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
}
+ return;
}
- } else if (source->IsFPRegister()) {
- XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(dst, src);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(dst, src);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(dst, src);
- }
- }
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(dst, src);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(dst, src);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(dst, src);
+ if (source->IsStackSlot()) {
+ __ push(src);
+ __ pop(dst);
+ } else {
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, src);
+ __ movss(dst, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, src);
+ __ movsd(dst, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, src);
+ __ movups(dst, kScratchDoubleReg);
+ }
}
- } else {
+ return;
+ }
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ if (src.type() == Constant::kHeapObject) {
+ __ Move(dst, src.ToHeapObject());
+ } else {
+ __ Move(dst, g.ToImmediate(source));
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ Move(dst, src.ToFloat32AsInt());
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ Move(dst, src.ToFloat64().AsUint64());
+ }
+ }
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, src);
- __ movsd(dst, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, src);
- __ movss(dst, kScratchDoubleReg);
+ if (destination->IsStackSlot()) {
+ if (src.type() == Constant::kHeapObject) {
+ __ mov(dst, src.ToHeapObject());
+ } else {
+ __ Move(dst, g.ToImmediate(source));
+ }
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, src);
- __ movups(dst, kScratchDoubleReg);
+ DCHECK(destination->IsFPStackSlot());
+ if (src.type() == Constant::kFloat32) {
+ __ Move(dst, Immediate(src.ToFloat32AsInt()));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ uint64_t constant_value = src.ToFloat64().AsUint64();
+ uint32_t lower = static_cast<uint32_t>(constant_value);
+ uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
+ Operand dst0 = dst;
+ Operand dst1 = g.ToOperand(destination, kPointerSize);
+ __ Move(dst0, Immediate(lower));
+ __ Move(dst1, Immediate(upper));
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
@@ -3352,94 +3686,106 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ push(src);
- __ mov(src, dst);
- __ pop(dst);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- // Register-memory.
- Register src = g.ToRegister(source);
- __ push(src);
- frame_access_state()->IncreaseSPDelta(1);
- Operand dst = g.ToOperand(destination);
- __ mov(src, dst);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
- __ pop(dst);
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand dst1 = g.ToOperand(destination);
- __ push(dst1);
- frame_access_state()->IncreaseSPDelta(1);
- Operand src1 = g.ToOperand(source);
- __ push(src1);
- Operand dst2 = g.ToOperand(destination);
- __ pop(dst2);
- frame_access_state()->IncreaseSPDelta(-1);
- Operand src2 = g.ToOperand(source);
- __ pop(src2);
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- // XMM register-register swap.
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(kScratchDoubleReg, src);
- __ movaps(src, dst);
- __ movaps(dst, kScratchDoubleReg);
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- // XMM register-memory swap.
- XMMRegister reg = g.ToDoubleRegister(source);
- Operand other = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, other);
- __ movsd(other, reg);
- __ movaps(reg, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, other);
- __ movss(other, reg);
- __ movaps(reg, kScratchDoubleReg);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, other);
- __ movups(other, reg);
- __ movups(reg, kScratchDoubleReg);
- }
- } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
- // Double-width memory-to-memory.
- Operand src0 = g.ToOperand(source);
- Operand dst0 = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
- __ movsd(src0, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ movss(src0, kScratchDoubleReg);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
- __ push(g.ToOperand(source, 2 * kPointerSize));
- __ pop(g.ToOperand(destination, 2 * kPointerSize));
- __ push(g.ToOperand(source, 3 * kPointerSize));
- __ pop(g.ToOperand(destination, 3 * kPointerSize));
- __ movups(src0, kScratchDoubleReg);
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(kScratchDoubleReg, src);
+ __ movaps(src, dst);
+ __ movaps(dst, kScratchDoubleReg);
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kRegisterToStack: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ __ push(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand dst = g.ToOperand(destination);
+ __ mov(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ pop(dst);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, dst);
+ __ movss(dst, src);
+ __ movaps(src, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, dst);
+ __ movsd(dst, src);
+ __ movaps(src, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, dst);
+ __ movups(dst, src);
+ __ movups(src, kScratchDoubleReg);
+ }
+ }
+ return;
+ }
+ case MoveType::kStackToStack: {
+ if (source->IsStackSlot()) {
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
+ } else {
+ DCHECK(source->IsFPStackSlot());
+ Operand src0 = g.ToOperand(source);
+ Operand dst0 = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ movss(src0, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
+ __ movsd(src0, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
+ __ push(g.ToOperand(source, 2 * kPointerSize));
+ __ pop(g.ToOperand(destination, 2 * kPointerSize));
+ __ push(g.ToOperand(source, 3 * kPointerSize));
+ __ pop(g.ToOperand(destination, 3 * kPointerSize));
+ __ movups(src0, kScratchDoubleReg);
+ }
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -3451,6 +3797,13 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
+#undef kScratchDoubleReg
+#undef ASSEMBLE_COMPARE
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_MOVX
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index a17d9f06ce..55833df4d4 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -229,6 +229,10 @@ namespace compiler {
V(IA32I8x16ExtractLane) \
V(SSEI8x16ReplaceLane) \
V(AVXI8x16ReplaceLane) \
+ V(SSEI8x16Shl) \
+ V(AVXI8x16Shl) \
+ V(SSEI8x16ShrS) \
+ V(AVXI8x16ShrS) \
V(IA32I8x16Neg) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
@@ -238,6 +242,8 @@ namespace compiler {
V(AVXI8x16Sub) \
V(SSEI8x16SubSaturateS) \
V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16Mul) \
+ V(AVXI8x16Mul) \
V(SSEI8x16MinS) \
V(AVXI8x16MinS) \
V(SSEI8x16MaxS) \
@@ -254,6 +260,8 @@ namespace compiler {
V(AVXI8x16AddSaturateU) \
V(SSEI8x16SubSaturateU) \
V(AVXI8x16SubSaturateU) \
+ V(SSEI8x16ShrU) \
+ V(AVXI8x16ShrU) \
V(SSEI8x16MinU) \
V(AVXI8x16MinU) \
V(SSEI8x16MaxU) \
@@ -270,7 +278,9 @@ namespace compiler {
V(SSES128Or) \
V(AVXS128Or) \
V(SSES128Xor) \
- V(AVXS128Xor)
+ V(AVXS128Xor) \
+ V(SSES128Select) \
+ V(AVXS128Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index db43c1ed1c..3c2207eee2 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -212,6 +212,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI8x16ReplaceLane:
case kAVXI8x16ReplaceLane:
case kIA32I8x16Neg:
+ case kSSEI8x16Shl:
+ case kAVXI8x16Shl:
+ case kSSEI8x16ShrS:
+ case kAVXI8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
case kSSEI8x16AddSaturateS:
@@ -220,6 +224,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16Sub:
case kSSEI8x16SubSaturateS:
case kAVXI8x16SubSaturateS:
+ case kSSEI8x16Mul:
+ case kAVXI8x16Mul:
case kSSEI8x16MinS:
case kAVXI8x16MinS:
case kSSEI8x16MaxS:
@@ -236,6 +242,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16AddSaturateU:
case kSSEI8x16SubSaturateU:
case kAVXI8x16SubSaturateU:
+ case kSSEI8x16ShrU:
+ case kAVXI8x16ShrU:
case kSSEI8x16MinU:
case kAVXI8x16MinU:
case kSSEI8x16MaxU:
@@ -253,6 +261,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Or:
case kSSES128Xor:
case kAVXS128Xor:
+ case kSSES128Select:
+ case kAVXS128Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index d8bf250ec6..aa6e9fd607 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -77,6 +77,8 @@ class IA32OperandGenerator final : public OperandGenerator {
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#else
+ return false;
#endif
}
default:
@@ -208,6 +210,20 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
}
+void VisitRRISimd(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
} // namespace
@@ -271,9 +287,15 @@ void InstructionSelector::VisitLoad(Node* node) {
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ code |= MiscField::encode(kMemoryAccessPoisoned);
+ }
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -710,27 +732,29 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
-#define RO_OP_LIST(V) \
- V(Word32Clz, kIA32Lzcnt) \
- V(Word32Ctz, kIA32Tzcnt) \
- V(Word32Popcnt, kIA32Popcnt) \
- V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
- V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
- V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
- V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
- V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
- V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
- V(BitcastFloat32ToInt32, kIA32BitcastFI) \
- V(BitcastInt32ToFloat32, kIA32BitcastIF) \
- V(Float32Sqrt, kSSEFloat32Sqrt) \
- V(Float64Sqrt, kSSEFloat64Sqrt) \
- V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
+ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
+ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kSSEFloat32Sqrt) \
+ V(Float64Sqrt, kSSEFloat64Sqrt) \
+ V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kIA32Movsxbl) \
+ V(SignExtendWord16ToInt32, kIA32Movsxwl)
#define RR_OP_LIST(V) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -766,6 +790,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -773,6 +798,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RRO_FLOAT_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -780,6 +806,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
+#undef RRO_FLOAT_OP_LIST
#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -787,6 +814,7 @@ RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
+#undef FLOAT_UNOP_LIST
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
@@ -955,16 +983,16 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
IA32OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
InstructionOperand temps[] = {g.TempRegister()};
size_t const temp_count = arraysize(temps);
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
@@ -1015,9 +1043,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
IA32OperandGenerator g(this);
int reverse_slot = 0;
@@ -1025,7 +1053,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1270,30 +1298,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Compare(Load(js_stack_limit), LoadStackPointer)
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kIA32StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- }
+ CHECK(cont->IsBranch());
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
return;
}
}
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
+} // namespace
// Shared routine for word comparison with zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1302,41 +1322,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1348,17 +1368,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Add, cont);
+ return VisitBinop(this, node, kIA32Add, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Sub, cont);
+ return VisitBinop(this, node, kIA32Sub, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Imul, cont);
+ return VisitBinop(this, node, kIA32Imul, cont);
default:
break;
}
@@ -1366,52 +1386,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kIA32Test, cont);
+ return VisitWordCompare(this, value, kIA32Test, cont);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- IA32OperandGenerator g(selector);
- VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ IA32OperandGenerator g(this);
+ VisitCompare(this, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1419,24 +1404,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1448,7 +1435,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -1577,7 +1564,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
@@ -1586,7 +1573,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1596,13 +1583,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -1628,7 +1615,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1637,15 +1624,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1677,7 +1664,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1687,15 +1674,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1771,11 +1758,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -1888,11 +1876,52 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
}
}
+#define SIMD_I8X16_SHIFT_OPCODES(V) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define VISIT_SIMD_I8X16_SHIFT(Op) \
+ void InstructionSelector::Visit##Op(Node* node) { \
+ VisitRRISimd(this, node, kAVX##Op, kSSE##Op); \
+ }
+
+SIMD_I8X16_SHIFT_OPCODES(VISIT_SIMD_I8X16_SHIFT)
+#undef SIMD_I8X16_SHIFT_OPCODES
+#undef VISIT_SIMD_I8X16_SHIFT
+
+void InstructionSelector::VisitI8x16Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ if (IsSupported(AVX)) {
+ Emit(kAVXI8x16Mul, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kSSEI8x16Mul, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
void InstructionSelector::VisitS128Zero(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32S128Zero, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitS128Select(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand2 = g.UseRegister(node->InputAt(2));
+ if (IsSupported(AVX)) {
+ Emit(kAVXS128Select, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ g.Use(node->InputAt(1)), operand2);
+ } else {
+ Emit(kSSES128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ operand2);
+ }
+}
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -1928,6 +1957,7 @@ SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
+#undef SIMD_INT_TYPES
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1942,6 +1972,7 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
+#undef SIMD_SHIFT_OPCODES
#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1950,6 +1981,7 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
}
SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
#undef VISIT_SIMD_INT_UNOP
+#undef SIMD_INT_UNOP_LIST
#define VISIT_SIMD_OTHER_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1959,6 +1991,7 @@ SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
}
SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
#undef VISIT_SIMD_OTHER_UNOP
+#undef SIMD_OTHER_UNOP_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1966,6 +1999,7 @@ SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
@@ -2005,6 +2039,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index df3078d739..035833af0f 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -69,49 +69,49 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(AtomicLoadInt8) \
- V(AtomicLoadUint8) \
- V(AtomicLoadInt16) \
- V(AtomicLoadUint16) \
- V(AtomicLoadWord32) \
- V(AtomicStoreWord8) \
- V(AtomicStoreWord16) \
- V(AtomicStoreWord32) \
- V(AtomicExchangeInt8) \
- V(AtomicExchangeUint8) \
- V(AtomicExchangeInt16) \
- V(AtomicExchangeUint16) \
- V(AtomicExchangeWord32) \
- V(AtomicCompareExchangeInt8) \
- V(AtomicCompareExchangeUint8) \
- V(AtomicCompareExchangeInt16) \
- V(AtomicCompareExchangeUint16) \
- V(AtomicCompareExchangeWord32) \
- V(AtomicAddInt8) \
- V(AtomicAddUint8) \
- V(AtomicAddInt16) \
- V(AtomicAddUint16) \
- V(AtomicAddWord32) \
- V(AtomicSubInt8) \
- V(AtomicSubUint8) \
- V(AtomicSubInt16) \
- V(AtomicSubUint16) \
- V(AtomicSubWord32) \
- V(AtomicAndInt8) \
- V(AtomicAndUint8) \
- V(AtomicAndInt16) \
- V(AtomicAndUint16) \
- V(AtomicAndWord32) \
- V(AtomicOrInt8) \
- V(AtomicOrUint8) \
- V(AtomicOrInt16) \
- V(AtomicOrUint16) \
- V(AtomicOrWord32) \
- V(AtomicXorInt8) \
- V(AtomicXorUint8) \
- V(AtomicXorInt16) \
- V(AtomicXorUint16) \
- V(AtomicXorWord32) \
+ V(Word32AtomicLoadInt8) \
+ V(Word32AtomicLoadUint8) \
+ V(Word32AtomicLoadInt16) \
+ V(Word32AtomicLoadUint16) \
+ V(Word32AtomicLoadWord32) \
+ V(Word32AtomicStoreWord8) \
+ V(Word32AtomicStoreWord16) \
+ V(Word32AtomicStoreWord32) \
+ V(Word32AtomicExchangeInt8) \
+ V(Word32AtomicExchangeUint8) \
+ V(Word32AtomicExchangeInt16) \
+ V(Word32AtomicExchangeUint16) \
+ V(Word32AtomicExchangeWord32) \
+ V(Word32AtomicCompareExchangeInt8) \
+ V(Word32AtomicCompareExchangeUint8) \
+ V(Word32AtomicCompareExchangeInt16) \
+ V(Word32AtomicCompareExchangeUint16) \
+ V(Word32AtomicCompareExchangeWord32) \
+ V(Word32AtomicAddInt8) \
+ V(Word32AtomicAddUint8) \
+ V(Word32AtomicAddInt16) \
+ V(Word32AtomicAddUint16) \
+ V(Word32AtomicAddWord32) \
+ V(Word32AtomicSubInt8) \
+ V(Word32AtomicSubUint8) \
+ V(Word32AtomicSubInt16) \
+ V(Word32AtomicSubUint16) \
+ V(Word32AtomicSubWord32) \
+ V(Word32AtomicAndInt8) \
+ V(Word32AtomicAndUint8) \
+ V(Word32AtomicAndInt16) \
+ V(Word32AtomicAndUint16) \
+ V(Word32AtomicAndWord32) \
+ V(Word32AtomicOrInt8) \
+ V(Word32AtomicOrUint8) \
+ V(Word32AtomicOrInt16) \
+ V(Word32AtomicOrUint16) \
+ V(Word32AtomicOrWord32) \
+ V(Word32AtomicXorInt8) \
+ V(Word32AtomicXorUint8) \
+ V(Word32AtomicXorInt16) \
+ V(Word32AtomicXorUint16) \
+ V(Word32AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -174,9 +174,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_deoptimize = 2,
- kFlags_set = 3,
- kFlags_trap = 4
+ kFlags_branch_and_poison = 2,
+ kFlags_deoptimize = 3,
+ kFlags_deoptimize_and_poison = 4,
+ kFlags_set = 5,
+ kFlags_trap = 6
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -219,6 +221,12 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const FlagsCondition& fc);
+enum MemoryAccessMode {
+ kMemoryAccessDirect = 0,
+ kMemoryAccessProtected = 1,
+ kMemoryAccessPoisoned = 2
+};
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index f7afaab697..905ae4e6f0 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -304,53 +304,53 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
return kIsLoadOperation;
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
return kHasSideEffect;
- case kAtomicExchangeInt8:
- case kAtomicExchangeUint8:
- case kAtomicExchangeInt16:
- case kAtomicExchangeUint16:
- case kAtomicExchangeWord32:
- case kAtomicCompareExchangeInt8:
- case kAtomicCompareExchangeUint8:
- case kAtomicCompareExchangeInt16:
- case kAtomicCompareExchangeUint16:
- case kAtomicCompareExchangeWord32:
- case kAtomicAddInt8:
- case kAtomicAddUint8:
- case kAtomicAddInt16:
- case kAtomicAddUint16:
- case kAtomicAddWord32:
- case kAtomicSubInt8:
- case kAtomicSubUint8:
- case kAtomicSubInt16:
- case kAtomicSubUint16:
- case kAtomicSubWord32:
- case kAtomicAndInt8:
- case kAtomicAndUint8:
- case kAtomicAndInt16:
- case kAtomicAndUint16:
- case kAtomicAndWord32:
- case kAtomicOrInt8:
- case kAtomicOrUint8:
- case kAtomicOrInt16:
- case kAtomicOrUint16:
- case kAtomicOrWord32:
- case kAtomicXorInt8:
- case kAtomicXorUint8:
- case kAtomicXorInt16:
- case kAtomicXorUint16:
- case kAtomicXorWord32:
+ case kWord32AtomicExchangeInt8:
+ case kWord32AtomicExchangeUint8:
+ case kWord32AtomicExchangeInt16:
+ case kWord32AtomicExchangeUint16:
+ case kWord32AtomicExchangeWord32:
+ case kWord32AtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeWord32:
+ case kWord32AtomicAddInt8:
+ case kWord32AtomicAddUint8:
+ case kWord32AtomicAddInt16:
+ case kWord32AtomicAddUint16:
+ case kWord32AtomicAddWord32:
+ case kWord32AtomicSubInt8:
+ case kWord32AtomicSubUint8:
+ case kWord32AtomicSubInt16:
+ case kWord32AtomicSubUint16:
+ case kWord32AtomicSubWord32:
+ case kWord32AtomicAndInt8:
+ case kWord32AtomicAndUint8:
+ case kWord32AtomicAndInt16:
+ case kWord32AtomicAndUint16:
+ case kWord32AtomicAndWord32:
+ case kWord32AtomicOrInt8:
+ case kWord32AtomicOrUint8:
+ case kWord32AtomicOrInt16:
+ case kWord32AtomicOrUint16:
+ case kWord32AtomicOrWord32:
+ case kWord32AtomicXorInt8:
+ case kWord32AtomicXorUint8:
+ case kWord32AtomicXorInt16:
+ case kWord32AtomicXorUint16:
+ case kWord32AtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
@@ -365,7 +365,8 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
- (instr->flags_mode() == kFlags_branch));
+ (instr->flags_mode() == kFlags_branch) ||
+ (instr->flags_mode() == kFlags_branch_and_poison));
}
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 7c7a2708c5..56ccd9fc64 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -206,6 +206,15 @@ class OperandGenerator {
return op;
}
+ InstructionOperand TempSimd128Register() {
+ UnallocatedOperand op = UnallocatedOperand(
+ UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128,
+ op.virtual_register());
+ return op;
+ }
+
InstructionOperand TempRegister(Register reg) {
return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(),
InstructionOperand::kInvalidVirtualRegister);
@@ -353,14 +362,21 @@ class FlagsContinuation final {
// Creates a new flags continuation from the given condition and true/false
// blocks.
- FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
- BasicBlock* false_block)
- : mode_(kFlags_branch),
- condition_(condition),
- true_block_(true_block),
- false_block_(false_block) {
- DCHECK_NOT_NULL(true_block);
- DCHECK_NOT_NULL(false_block);
+ static FlagsContinuation ForBranch(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block,
+ LoadPoisoning masking) {
+ FlagsMode mode = masking == LoadPoisoning::kDoPoison
+ ? kFlags_branch_and_poison
+ : kFlags_branch;
+ return FlagsContinuation(mode, condition, true_block, false_block);
+ }
+
+ static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block) {
+ return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
+ false_block);
}
// Creates a new flags continuation for an eager deoptimization exit.
@@ -368,8 +384,13 @@ class FlagsContinuation final {
DeoptimizeKind kind,
DeoptimizeReason reason,
VectorSlotPair const& feedback,
- Node* frame_state) {
- return FlagsContinuation(condition, kind, reason, feedback, frame_state);
+ Node* frame_state,
+ LoadPoisoning masking) {
+ FlagsMode mode = masking == LoadPoisoning::kDoPoison
+ ? kFlags_deoptimize_and_poison
+ : kFlags_deoptimize;
+ return FlagsContinuation(mode, condition, kind, reason, feedback,
+ frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -384,8 +405,16 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const { return mode_ == kFlags_branch; }
- bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
+ bool IsBranch() const {
+ return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
+ }
+ bool IsDeoptimize() const {
+ return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
+ }
+ bool IsPoisoned() const {
+ return mode_ == kFlags_branch_and_poison ||
+ mode_ == kFlags_deoptimize_and_poison;
+ }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
@@ -473,17 +502,30 @@ class FlagsContinuation final {
}
private:
- FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
- DeoptimizeReason reason, VectorSlotPair const& feedback,
- Node* frame_state)
- : mode_(kFlags_deoptimize),
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ BasicBlock* true_block, BasicBlock* false_block)
+ : mode_(mode),
+ condition_(condition),
+ true_block_(true_block),
+ false_block_(false_block) {
+ DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK_NOT_NULL(true_block);
+ DCHECK_NOT_NULL(false_block);
+ }
+
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state)
+ : mode_(mode),
condition_(condition),
kind_(kind),
reason_(reason),
feedback_(feedback),
frame_state_or_result_(frame_state) {
+ DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
DCHECK_NOT_NULL(frame_state);
}
+
FlagsContinuation(FlagsCondition condition, Node* result)
: mode_(kFlags_set),
condition_(condition),
@@ -502,13 +544,13 @@ class FlagsContinuation final {
FlagsMode const mode_;
FlagsCondition condition_;
- DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
- DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
- VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize
- Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
+ DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
+ DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
+ Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
// or mode_ == kFlags_set.
- BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
- BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
};
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index c94b42b458..954a1fc272 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -24,9 +24,11 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
+ EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
- EnableSerialization enable_serialization)
+ EnableSerialization enable_serialization, LoadPoisoning load_poisoning)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -45,6 +47,9 @@ InstructionSelector::InstructionSelector(
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
enable_serialization_(enable_serialization),
+ enable_switch_jump_table_(enable_switch_jump_table),
+ enable_speculation_poison_(enable_speculation_poison),
+ load_poisoning_(load_poisoning),
frame_(frame),
instruction_selection_failed_(false) {
instructions_.reserve(node_count);
@@ -651,16 +656,16 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
- CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
FrameStateDescriptor* frame_state)
- : descriptor(descriptor),
+ : descriptor(call_descriptor),
frame_state_descriptor(frame_state),
output_nodes(zone),
outputs(zone),
instruction_args(zone),
pushed_nodes(zone) {
- output_nodes.reserve(descriptor->ReturnCount());
- outputs.reserve(descriptor->ReturnCount());
+ output_nodes.reserve(call_descriptor->ReturnCount());
+ outputs.reserve(call_descriptor->ReturnCount());
pushed_nodes.reserve(input_count());
instruction_args.reserve(input_count() + frame_state_value_count());
}
@@ -758,19 +763,34 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* callee = call->InputAt(0);
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
+ bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
+ // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
+ // JS-linkage callers with a register code target. The problem is that the
+ // code target register may be clobbered before the final jmp by
+ // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
+ // entirely remove support for tail-calls from JS-linkage callers.
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+#ifdef V8_EMBEDDED_BUILTINS
+ : is_tail_call ? g.UseUniqueRegister(callee)
+ : g.UseRegister(callee));
+#else
+ : g.UseRegister(callee));
+#endif
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallWasmFunction:
buffer->instruction_args.push_back(
@@ -778,7 +798,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
callee->opcode() == IrOpcode::kRelocatableInt32Constant))
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
@@ -1161,6 +1183,11 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
+ case IrOpcode::kPoisonedLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitPoisonedLoad(node);
+ }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1470,6 +1497,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+ case IrOpcode::kSpeculationPoison:
+ return VisitSpeculationPoison(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kLoadStackPointer:
@@ -1510,18 +1539,18 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
- case IrOpcode::kAtomicLoad: {
+ case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
- return VisitAtomicLoad(node);
+ return VisitWord32AtomicLoad(node);
}
- case IrOpcode::kAtomicStore:
- return VisitAtomicStore(node);
+ case IrOpcode::kWord32AtomicStore:
+ return VisitWord32AtomicStore(node);
#define ATOMIC_CASE(name) \
- case IrOpcode::kAtomic##name: { \
+ case IrOpcode::kWord32Atomic##name: { \
MachineType type = AtomicOpRepresentationOf(node->op()); \
MarkAsRepresentation(type.representation(), node); \
- return VisitAtomic##name(node); \
+ return VisitWord32Atomic##name(node); \
}
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
@@ -1538,6 +1567,16 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
}
+ case IrOpcode::kSignExtendWord8ToInt32:
+ return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
+ case IrOpcode::kSignExtendWord16ToInt32:
+ return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
+ case IrOpcode::kSignExtendWord8ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
+ case IrOpcode::kSignExtendWord16ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
+ case IrOpcode::kSignExtendWord32ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
@@ -1774,12 +1813,20 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kS1x16AllTrue:
return MarkAsWord32(node), VisitS1x16AllTrue(node);
default:
- V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
- node->opcode(), node->op()->mnemonic(), node->id());
+ FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
+ node->op()->mnemonic(), node->id());
break;
}
}
+void InstructionSelector::VisitSpeculationPoison(Node* node) {
+ CHECK(enable_speculation_poison_ == kEnableSpeculationPoison);
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, LinkageLocation::ForRegister(
+ kSpeculationPoisonRegister.code(),
+ MachineType::UintPtr())));
+}
+
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@@ -2078,6 +2125,18 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
#endif // V8_TARGET_ARCH_32_BIT
// 64 bit targets do not implement the following instructions.
@@ -2108,36 +2167,14 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2154,6 +2191,12 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
@@ -2204,55 +2247,33 @@ void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
+void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
@@ -2365,15 +2386,15 @@ void InstructionSelector::VisitConstant(Node* node) {
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- const CallDescriptor* descriptor = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
+ if (call_descriptor->NeedsFrameState()) {
frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
+ node->InputAt(static_cast<int>(call_descriptor->InputCount())));
}
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+ CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on some architectures it's probably better to use
@@ -2383,10 +2404,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
- EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+ EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
// Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
+ CallDescriptor::Flags flags = call_descriptor->flags();
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
flags |= CallDescriptor::kHasExceptionHandler;
@@ -2395,11 +2416,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount()));
+ opcode = kArchCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount()));
break;
case CallDescriptor::kCallCodeObject:
opcode = kArchCallCodeObject | MiscField::encode(flags);
@@ -2421,7 +2441,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
- EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
+ EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
@@ -2438,26 +2458,29 @@ void InstructionSelector::VisitCallWithCallerSavedRegisters(
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- CallDescriptor const* descriptor = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
DCHECK(caller->CanTailCall(node));
const CallDescriptor* callee = CallDescriptorOf(node->op());
int stack_param_delta = callee->GetStackParameterDelta(caller);
- CallBuffer buffer(zone(), descriptor, nullptr);
+ CallBuffer buffer(zone(), call_descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
CallBufferFlags flags(kCallCodeImmediate | kCallTail);
if (IsTailCallAddressImmediate()) {
flags |= kCallAddressImmediate;
}
+ if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
+ flags |= kCallFixedTargetRegister;
+ }
InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObjectFromJSFunction;
break;
@@ -2470,7 +2493,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
temps.push_back(g.TempRegister());
}
} else {
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObject;
break;
@@ -2485,7 +2508,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
return;
}
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(call_descriptor->flags());
Emit(kArchPrepareTailCall, g.NoOutput());
@@ -2534,6 +2557,51 @@ void InstructionSelector::VisitReturn(Node* ret) {
Emit(kArchRet, 0, nullptr, input_count, value_locations);
}
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ LoadPoisoning poisoning =
+ IsSafetyCheckOf(branch->op()) == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch, poisoning);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1),
+ poisoning);
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1), poisoning);
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
DeoptimizeKind kind, DeoptimizeReason reason,
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 75c41c165f..e30dba0aa0 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -51,17 +51,28 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
enum EnableScheduling { kDisableScheduling, kEnableScheduling };
enum EnableSerialization { kDisableSerialization, kEnableSerialization };
+ enum EnableSwitchJumpTable {
+ kDisableSwitchJumpTable,
+ kEnableSwitchJumpTable
+ };
+ enum EnableSpeculationPoison {
+ kDisableSpeculationPoison,
+ kEnableSpeculationPoison
+ };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
+ EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
? kEnableScheduling
: kDisableScheduling,
- EnableSerialization enable_serialization = kDisableSerialization);
+ EnableSerialization enable_serialization = kDisableSerialization,
+ LoadPoisoning poisoning = LoadPoisoning::kDontPoison);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
@@ -158,6 +169,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
+ // TODO(jarin) This is temporary until the poisoning is universally supported.
+ static bool SupportsSpeculationPoisoning();
+
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -277,7 +291,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum CallBufferFlag {
kCallCodeImmediate = 1u << 0,
kCallAddressImmediate = 1u << 1,
- kCallTail = 1u << 2
+ kCallTail = 1u << 2,
+ kCallFixedTargetRegister = 1u << 3,
};
typedef base::Flags<CallBufferFlag> CallBufferFlags;
@@ -354,10 +369,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitUnreachable(Node* node);
void VisitDeadValue(Node* node);
+ void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
+
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
- const CallDescriptor* descriptor, Node* node);
+ const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
- const CallDescriptor* descriptor, Node* node);
+ const CallDescriptor* call_descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
@@ -445,6 +462,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionScheduler* scheduler_;
EnableScheduling enable_scheduling_;
EnableSerialization enable_serialization_;
+ EnableSwitchJumpTable enable_switch_jump_table_;
+ EnableSpeculationPoison enable_speculation_poison_;
+ LoadPoisoning load_poisoning_;
Frame* frame_;
bool instruction_selection_failed_;
};
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index f335177b95..85d4533d25 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
return false;
}
+bool LocationOperand::IsCompatible(LocationOperand* op) {
+ if (IsRegister() || IsStackSlot()) {
+ return op->IsRegister() || op->IsStackSlot();
+ } else if (kSimpleFPAliasing) {
+ // A backend may choose to generate the same instruction sequence regardless
+ // of the FP representation. As a result, we can relax the compatibility and
+ // allow a Double to be moved in a Float for example. However, this is only
+ // allowed if registers do not overlap.
+ return (IsFPRegister() || IsFPStackSlot()) &&
+ (op->IsFPRegister() || op->IsFPStackSlot());
+ } else if (IsFloatRegister() || IsFloatStackSlot()) {
+ return op->IsFloatRegister() || op->IsFloatStackSlot();
+ } else if (IsDoubleRegister() || IsDoubleStackSlot()) {
+ return op->IsDoubleRegister() || op->IsDoubleStackSlot();
+ } else {
+ return (IsSimd128Register() || IsSimd128StackSlot()) &&
+ (op->IsSimd128Register() || op->IsSimd128StackSlot());
+ }
+}
+
void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionOperand wrapper;
@@ -426,8 +446,12 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
+ case kFlags_branch_and_poison:
+ return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
+ case kFlags_deoptimize_and_poison:
+ return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 7772f18ad9..11da39aacb 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand {
UNREACHABLE();
}
+ // Return true if the locations can be moved to one another.
+ bool IsCompatible(LocationOperand* op);
+
static LocationOperand* cast(InstructionOperand* op) {
DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op);
@@ -889,7 +892,8 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
}
bool IsTrap() const {
@@ -1093,11 +1097,7 @@ class V8_EXPORT_PRIVATE Constant final {
private:
Type type_;
-#if V8_TARGET_ARCH_32_BIT
- RelocInfo::Mode rmode_ = RelocInfo::NONE32;
-#else
- RelocInfo::Mode rmode_ = RelocInfo::NONE64;
-#endif
+ RelocInfo::Mode rmode_ = RelocInfo::NONE;
int64_t value_;
};
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 940f0904b3..ca1bf399b0 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -75,11 +75,11 @@ void Int64Lowering::LowerGraph() {
namespace {
-int GetReturnIndexAfterLowering(
- CallDescriptor* descriptor, int old_index) {
+int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor,
+ int old_index) {
int result = old_index;
for (int i = 0; i < old_index; i++) {
- if (descriptor->GetReturnType(i).representation() ==
+ if (call_descriptor->GetReturnType(i).representation() ==
MachineRepresentation::kWord64) {
result++;
}
@@ -87,9 +87,9 @@ int GetReturnIndexAfterLowering(
return result;
}
-int GetReturnCountAfterLowering(CallDescriptor* descriptor) {
+int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
return GetReturnIndexAfterLowering(
- descriptor, static_cast<int>(descriptor->ReturnCount()));
+ call_descriptor, static_cast<int>(call_descriptor->ReturnCount()));
}
int GetParameterIndexAfterLowering(
@@ -314,32 +314,32 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kTailCall: {
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
bool returns_require_lowering =
- GetReturnCountAfterLowering(descriptor) !=
- static_cast<int>(descriptor->ReturnCount());
+ GetReturnCountAfterLowering(call_descriptor) !=
+ static_cast<int>(call_descriptor->ReturnCount());
if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call
// descriptor is enough.
- auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
+ auto new_descriptor = GetI32WasmCallDescriptor(zone(), call_descriptor);
NodeProperties::ChangeOp(node, common()->TailCall(new_descriptor));
}
break;
}
case IrOpcode::kCall: {
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
bool returns_require_lowering =
- GetReturnCountAfterLowering(descriptor) !=
- static_cast<int>(descriptor->ReturnCount());
+ GetReturnCountAfterLowering(call_descriptor) !=
+ static_cast<int>(call_descriptor->ReturnCount());
if (DefaultLowering(node) || returns_require_lowering) {
// We have to adjust the call descriptor.
- NodeProperties::ChangeOp(
- node, common()->Call(GetI32WasmCallDescriptor(zone(), descriptor)));
+ NodeProperties::ChangeOp(node, common()->Call(GetI32WasmCallDescriptor(
+ zone(), call_descriptor)));
}
if (returns_require_lowering) {
- size_t return_arity = descriptor->ReturnCount();
+ size_t return_arity = call_descriptor->ReturnCount();
if (return_arity == 1) {
// We access the additional return values through projections.
Node* low_node =
@@ -355,14 +355,14 @@ void Int64Lowering::LowerNode(Node* node) {
++old_index, ++new_index) {
Node* use_node = projections[old_index];
DCHECK_EQ(ProjectionIndexOf(use_node->op()), old_index);
- DCHECK_EQ(GetReturnIndexAfterLowering(descriptor,
+ DCHECK_EQ(GetReturnIndexAfterLowering(call_descriptor,
static_cast<int>(old_index)),
static_cast<int>(new_index));
if (new_index != old_index) {
NodeProperties::ChangeOp(
use_node, common()->Projection(new_index));
}
- if (descriptor->GetReturnType(old_index).representation() ==
+ if (call_descriptor->GetReturnType(old_index).representation() ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(
common()->Projection(new_index + 1), node,
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 7ff2bf6d5e..a6d98586ad 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -39,6 +39,14 @@ class JSCallReduction {
return function->shared()->HasBuiltinFunctionId();
}
+ bool BuiltinCanBeInlined() {
+ DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ // Do not inline if the builtin may have break points.
+ return !function->shared()->HasBreakInfo();
+ }
+
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
@@ -245,7 +253,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
map_index = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
} else {
DCHECK_GE(receiver_map->elements_kind(), UINT8_ELEMENTS);
- DCHECK_LE(receiver_map->elements_kind(), UINT8_CLAMPED_ELEMENTS);
+ DCHECK_LE(receiver_map->elements_kind(), BIGINT64_ELEMENTS);
map_index = (kind == IterationKind::kValues
? Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX
: Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX) +
@@ -864,30 +872,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-namespace {
-
-bool HasInstanceTypeWitness(Node* receiver, Node* effect,
- InstanceType instance_type) {
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- switch (result) {
- case NodeProperties::kUnreliableReceiverMaps:
- case NodeProperties::kReliableReceiverMaps:
- DCHECK_NE(0, receiver_maps.size());
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- if (receiver_maps[i]->instance_type() != instance_type) return false;
- }
- return true;
-
- case NodeProperties::kNoReceiverMaps:
- return false;
- }
- UNREACHABLE();
-}
-
-} // namespace
-
Reduction JSBuiltinReducer::ReduceCollectionIterator(
Node* node, InstanceType collection_instance_type,
int collection_iterator_map_index) {
@@ -895,7 +879,8 @@ Reduction JSBuiltinReducer::ReduceCollectionIterator(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ collection_instance_type)) {
// Figure out the proper collection iterator map.
Handle<Map> collection_iterator_map(
Map::cast(native_context()->get(collection_iterator_map_index)),
@@ -930,7 +915,8 @@ Reduction JSBuiltinReducer::ReduceCollectionSize(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ collection_instance_type)) {
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
receiver, effect, control);
@@ -1021,12 +1007,13 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
receiver, effect, control);
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kEliminatable);
- index = effect = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
- index, jsgraph()->NoContextConstant(), effect);
+ index = effect =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), table, index,
+ jsgraph()->NoContextConstant(), effect);
NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
// Update the {index} and {table} on the {receiver}.
@@ -1235,7 +1222,7 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
Node* value = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
effect, control);
@@ -1281,7 +1268,8 @@ Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -1324,7 +1312,8 @@ Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -1341,420 +1330,6 @@ Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
return Replace(value);
}
-// ES6 section 20.2.2.1 Math.abs ( x )
-Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.abs(a:plain-primitive) -> NumberAbs(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAbs(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.2 Math.acos ( x )
-Reduction JSBuiltinReducer::ReduceMathAcos(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.acos(a:plain-primitive) -> NumberAcos(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAcos(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.3 Math.acosh ( x )
-Reduction JSBuiltinReducer::ReduceMathAcosh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.acosh(a:plain-primitive) -> NumberAcosh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAcosh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.4 Math.asin ( x )
-Reduction JSBuiltinReducer::ReduceMathAsin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.asin(a:plain-primitive) -> NumberAsin(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAsin(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.5 Math.asinh ( x )
-Reduction JSBuiltinReducer::ReduceMathAsinh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.asinh(a:plain-primitive) -> NumberAsinh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAsinh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.6 Math.atan ( x )
-Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.atan(a:plain-primitive) -> NumberAtan(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAtan(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.7 Math.atanh ( x )
-Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.atanh(a:plain-primitive) -> NumberAtanh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAtanh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.atan2(a:plain-primitive,
- // b:plain-primitive) -> NumberAtan2(ToNumber(a),
- // ToNumber(b))
- Node* left = ToNumber(r.left());
- Node* right = ToNumber(r.right());
- Node* value = graph()->NewNode(simplified()->NumberAtan2(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.10 Math.ceil ( x )
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.ceil(a:plain-primitive) -> NumberCeil(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCeil(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.11 Math.clz32 ( x )
-Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.clz32(a:plain-primitive) -> NumberClz32(ToUint32(a))
- Node* input = ToUint32(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.12 Math.cos ( x )
-Reduction JSBuiltinReducer::ReduceMathCos(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.cos(a:plain-primitive) -> NumberCos(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCos(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.13 Math.cosh ( x )
-Reduction JSBuiltinReducer::ReduceMathCosh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.cosh(a:plain-primitive) -> NumberCosh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCosh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.14 Math.exp ( x )
-Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.exp(a:plain-primitive) -> NumberExp(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberExp(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.15 Math.expm1 ( x )
-Reduction JSBuiltinReducer::ReduceMathExpm1(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.expm1(a:number) -> NumberExpm1(a)
- Node* value = graph()->NewNode(simplified()->NumberExpm1(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.16 Math.floor ( x )
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.floor(a:plain-primitive) -> NumberFloor(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberFloor(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.17 Math.fround ( x )
-Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.fround(a:plain-primitive) -> NumberFround(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberFround(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.imul(a:plain-primitive,
- // b:plain-primitive) -> NumberImul(ToUint32(a),
- // ToUint32(b))
- Node* left = ToUint32(r.left());
- Node* right = ToUint32(r.right());
- Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.20 Math.log ( x )
-Reduction JSBuiltinReducer::ReduceMathLog(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.log(a:plain-primitive) -> NumberLog(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberLog(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.21 Math.log1p ( x )
-Reduction JSBuiltinReducer::ReduceMathLog1p(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.log1p(a:plain-primitive) -> NumberLog1p(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberLog1p(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.22 Math.log10 ( x )
-Reduction JSBuiltinReducer::ReduceMathLog10(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.log10(a:number) -> NumberLog10(a)
- Node* value = graph()->NewNode(simplified()->NumberLog10(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.23 Math.log2 ( x )
-Reduction JSBuiltinReducer::ReduceMathLog2(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.log2(a:number) -> NumberLog(a)
- Node* value = graph()->NewNode(simplified()->NumberLog2(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
-Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Math.max() -> -Infinity
- return Replace(jsgraph()->Constant(-V8_INFINITY));
- }
- if (r.InputsMatchAll(Type::PlainPrimitive())) {
- // Math.max(a:plain-primitive, b:plain-primitive, ...)
- Node* value = ToNumber(r.GetJSCallInput(0));
- for (int i = 1; i < r.GetJSCallArity(); i++) {
- Node* input = ToNumber(r.GetJSCallInput(i));
- value = graph()->NewNode(simplified()->NumberMax(), value, input);
- }
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
-Reduction JSBuiltinReducer::ReduceMathMin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Math.min() -> Infinity
- return Replace(jsgraph()->Constant(V8_INFINITY));
- }
- if (r.InputsMatchAll(Type::PlainPrimitive())) {
- // Math.min(a:plain-primitive, b:plain-primitive, ...)
- Node* value = ToNumber(r.GetJSCallInput(0));
- for (int i = 1; i < r.GetJSCallArity(); i++) {
- Node* input = ToNumber(r.GetJSCallInput(i));
- value = graph()->NewNode(simplified()->NumberMin(), value, input);
- }
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.26 Math.pow ( x, y )
-Reduction JSBuiltinReducer::ReduceMathPow(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.pow(a:plain-primitive,
- // b:plain-primitive) -> NumberPow(ToNumber(a), ToNumber(b))
- Node* left = ToNumber(r.left());
- Node* right = ToNumber(r.right());
- Node* value = graph()->NewNode(simplified()->NumberPow(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.28 Math.round ( x )
-Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.round(a:plain-primitive) -> NumberRound(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberRound(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.9 Math.cbrt ( x )
-Reduction JSBuiltinReducer::ReduceMathCbrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.cbrt(a:number) -> NumberCbrt(a)
- Node* value = graph()->NewNode(simplified()->NumberCbrt(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.29 Math.sign ( x )
-Reduction JSBuiltinReducer::ReduceMathSign(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sign(a:plain-primitive) -> NumberSign(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSign(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.30 Math.sin ( x )
-Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sin(a:plain-primitive) -> NumberSin(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSin(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.31 Math.sinh ( x )
-Reduction JSBuiltinReducer::ReduceMathSinh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sinh(a:plain-primitive) -> NumberSinh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSinh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.32 Math.sqrt ( x )
-Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sqrt(a:plain-primitive) -> NumberSqrt(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSqrt(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.33 Math.tan ( x )
-Reduction JSBuiltinReducer::ReduceMathTan(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.tan(a:plain-primitive) -> NumberTan(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTan(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.34 Math.tanh ( x )
-Reduction JSBuiltinReducer::ReduceMathTanh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.tanh(a:plain-primitive) -> NumberTanh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTanh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.35 Math.trunc ( x )
-Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.trunc(a:plain-primitive) -> NumberTrunc(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTrunc(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES6 section 20.1.2.2 Number.isFinite ( number )
Reduction JSBuiltinReducer::ReduceNumberIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1908,18 +1483,6 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
return Replace(value);
}
-// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
-Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // String.fromCharCode(a:plain-primitive) -> StringFromCharCode(a)
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
Node* GetStringWitness(Node* node) {
@@ -1964,13 +1527,13 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
// builtin instead of the calling function.
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState,
Operator::kNoDeopt | Operator::kNoWrite);
node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
}
@@ -1978,171 +1541,6 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
return NoChange();
}
-Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- Node* map = jsgraph()->HeapConstant(
- handle(native_context()->string_iterator_map(), isolate()));
-
- // Allocate new iterator and attach the iterator to this string.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
- a.Store(AccessBuilder::ForMap(), map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSStringIteratorString(), receiver);
- a.Store(AccessBuilder::ForJSStringIteratorIndex(),
- jsgraph()->SmiConstant(0));
- Node* value = effect = a.Finish();
-
- // Replace it.
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- if (HasInstanceTypeWitness(receiver, effect, JS_STRING_ITERATOR_TYPE)) {
- Node* string = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
- receiver, effect, control);
- Node* index = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, effect, control);
- Node* length = graph()->NewNode(simplified()->StringLength(), string);
-
- // branch0: if (index < length)
- Node* check0 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* etrue0 = effect;
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* done_true;
- Node* vtrue0;
- {
- done_true = jsgraph()->FalseConstant();
- Node* lead = graph()->NewNode(simplified()->StringCharCodeAt(), string,
- index, if_true0);
-
- // branch1: if ((lead & 0xFC00) === 0xD800)
- Node* check1 =
- graph()->NewNode(simplified()->NumberEqual(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(),
- lead, jsgraph()->Constant(0xFC00)),
- jsgraph()->Constant(0xD800));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_true0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1;
- {
- Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant());
- // branch2: if ((index + 1) < length)
- Node* check2 = graph()->NewNode(simplified()->NumberLessThan(),
- next_index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_true1);
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2;
- {
- Node* trail = graph()->NewNode(simplified()->StringCharCodeAt(),
- string, next_index, if_true2);
- // branch3: if ((trail & 0xFC00) === 0xDC00)
- Node* check3 = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
- jsgraph()->Constant(0xFC00)),
- jsgraph()->Constant(0xDC00));
- Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check3, if_true2);
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* vtrue3;
- {
- vtrue3 = graph()->NewNode(
- simplified()->NumberBitwiseOr(),
-// Need to swap the order for big-endian platforms
-#if V8_TARGET_BIG_ENDIAN
- graph()->NewNode(simplified()->NumberShiftLeft(), lead,
- jsgraph()->Constant(16)),
- trail);
-#else
- graph()->NewNode(simplified()->NumberShiftLeft(), trail,
- jsgraph()->Constant(16)),
- lead);
-#endif
- }
-
- Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
- Node* vfalse3 = lead;
- if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
- vtrue2 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue3, vfalse3, if_true2);
- }
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = lead;
- if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vtrue1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue2, vfalse2, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1 = lead;
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
-
- // Update iterator.[[NextIndex]]
- Node* char_length =
- graph()->NewNode(simplified()->StringLength(), vtrue0);
- index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
- etrue0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, index, etrue0, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* done_false;
- Node* vfalse0;
- {
- vfalse0 = jsgraph()->UndefinedConstant();
- done_false = jsgraph()->TrueConstant();
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
- Node* done =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- done_true, done_false, control);
-
- value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
- value, done, context, effect);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES section #sec-string.prototype.slice
Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
@@ -2173,6 +1571,7 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse;
+ Node* efalse;
{
// We need to convince TurboFan that {receiver_length}-1 is a valid
// Unsigned32 value, so we just apply NumberToUint32 to the result
@@ -2181,14 +1580,16 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
graph()->NewNode(simplified()->NumberSubtract(), receiver_length,
jsgraph()->OneConstant());
index = graph()->NewNode(simplified()->NumberToUint32(), index);
- vfalse = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
- if_false);
+ vfalse = efalse = graph()->NewNode(simplified()->StringCharAt(),
+ receiver, index, effect, if_false);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* value =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, efalse, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -2196,30 +1597,6 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
return NoChange();
}
-Reduction JSBuiltinReducer::ReduceStringToLowerCaseIntl(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
- NodeProperties::SetType(node, Type::String());
- return Changed(node);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceStringToUpperCaseIntl(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
- NodeProperties::SetType(node, Type::String());
- return Changed(node);
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::ReduceArrayBufferIsView(Node* node) {
Node* value = node->op()->ValueInputCount() >= 3
? NodeProperties::GetValueInput(node, 2)
@@ -2236,7 +1613,7 @@ Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect, instance_type)) {
// Load the {receiver}s field.
Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
receiver, effect, control);
@@ -2274,6 +1651,7 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
+ if (!r.BuiltinCanBeInlined()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
case kArrayEntries:
return ReduceArrayIterator(node, IterationKind::kEntries);
@@ -2316,105 +1694,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIteratorNext(
node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
- case kMathAbs:
- reduction = ReduceMathAbs(node);
- break;
- case kMathAcos:
- reduction = ReduceMathAcos(node);
- break;
- case kMathAcosh:
- reduction = ReduceMathAcosh(node);
- break;
- case kMathAsin:
- reduction = ReduceMathAsin(node);
- break;
- case kMathAsinh:
- reduction = ReduceMathAsinh(node);
- break;
- case kMathAtan:
- reduction = ReduceMathAtan(node);
- break;
- case kMathAtanh:
- reduction = ReduceMathAtanh(node);
- break;
- case kMathAtan2:
- reduction = ReduceMathAtan2(node);
- break;
- case kMathCbrt:
- reduction = ReduceMathCbrt(node);
- break;
- case kMathCeil:
- reduction = ReduceMathCeil(node);
- break;
- case kMathClz32:
- reduction = ReduceMathClz32(node);
- break;
- case kMathCos:
- reduction = ReduceMathCos(node);
- break;
- case kMathCosh:
- reduction = ReduceMathCosh(node);
- break;
- case kMathExp:
- reduction = ReduceMathExp(node);
- break;
- case kMathExpm1:
- reduction = ReduceMathExpm1(node);
- break;
- case kMathFloor:
- reduction = ReduceMathFloor(node);
- break;
- case kMathFround:
- reduction = ReduceMathFround(node);
- break;
- case kMathImul:
- reduction = ReduceMathImul(node);
- break;
- case kMathLog:
- reduction = ReduceMathLog(node);
- break;
- case kMathLog1p:
- reduction = ReduceMathLog1p(node);
- break;
- case kMathLog10:
- reduction = ReduceMathLog10(node);
- break;
- case kMathLog2:
- reduction = ReduceMathLog2(node);
- break;
- case kMathMax:
- reduction = ReduceMathMax(node);
- break;
- case kMathMin:
- reduction = ReduceMathMin(node);
- break;
- case kMathPow:
- reduction = ReduceMathPow(node);
- break;
- case kMathRound:
- reduction = ReduceMathRound(node);
- break;
- case kMathSign:
- reduction = ReduceMathSign(node);
- break;
- case kMathSin:
- reduction = ReduceMathSin(node);
- break;
- case kMathSinh:
- reduction = ReduceMathSinh(node);
- break;
- case kMathSqrt:
- reduction = ReduceMathSqrt(node);
- break;
- case kMathTan:
- reduction = ReduceMathTan(node);
- break;
- case kMathTanh:
- reduction = ReduceMathTanh(node);
- break;
- case kMathTrunc:
- reduction = ReduceMathTrunc(node);
- break;
case kNumberIsFinite:
reduction = ReduceNumberIsFinite(node);
break;
@@ -2445,21 +1724,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIteratorNext(
node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
- case kStringFromCharCode:
- reduction = ReduceStringFromCharCode(node);
- break;
case kStringConcat:
return ReduceStringConcat(node);
- case kStringIterator:
- return ReduceStringIterator(node);
- case kStringIteratorNext:
- return ReduceStringIteratorNext(node);
case kStringSlice:
return ReduceStringSlice(node);
- case kStringToLowerCaseIntl:
- return ReduceStringToLowerCaseIntl(node);
- case kStringToUpperCaseIntl:
- return ReduceStringToUpperCaseIntl(node);
case kArrayBufferIsView:
return ReduceArrayBufferIsView(node);
case kDataViewByteLength:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index b3c44c7a0f..d24bcc9746 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -69,52 +69,14 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMapHas(Node* node);
Reduction ReduceMapGet(Node* node);
- Reduction ReduceMathAbs(Node* node);
- Reduction ReduceMathAcos(Node* node);
- Reduction ReduceMathAcosh(Node* node);
- Reduction ReduceMathAsin(Node* node);
- Reduction ReduceMathAsinh(Node* node);
- Reduction ReduceMathAtan(Node* node);
- Reduction ReduceMathAtanh(Node* node);
- Reduction ReduceMathAtan2(Node* node);
- Reduction ReduceMathCbrt(Node* node);
- Reduction ReduceMathCeil(Node* node);
- Reduction ReduceMathClz32(Node* node);
- Reduction ReduceMathCos(Node* node);
- Reduction ReduceMathCosh(Node* node);
- Reduction ReduceMathExp(Node* node);
- Reduction ReduceMathExpm1(Node* node);
- Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathFround(Node* node);
- Reduction ReduceMathImul(Node* node);
- Reduction ReduceMathLog(Node* node);
- Reduction ReduceMathLog1p(Node* node);
- Reduction ReduceMathLog10(Node* node);
- Reduction ReduceMathLog2(Node* node);
- Reduction ReduceMathMax(Node* node);
- Reduction ReduceMathMin(Node* node);
- Reduction ReduceMathPow(Node* node);
- Reduction ReduceMathRound(Node* node);
- Reduction ReduceMathSign(Node* node);
- Reduction ReduceMathSin(Node* node);
- Reduction ReduceMathSinh(Node* node);
- Reduction ReduceMathSqrt(Node* node);
- Reduction ReduceMathTan(Node* node);
- Reduction ReduceMathTanh(Node* node);
- Reduction ReduceMathTrunc(Node* node);
Reduction ReduceNumberIsFinite(Node* node);
Reduction ReduceNumberIsInteger(Node* node);
Reduction ReduceNumberIsNaN(Node* node);
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringConcat(Node* node);
- Reduction ReduceStringFromCharCode(Node* node);
- Reduction ReduceStringIterator(Node* node);
- Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringSlice(Node* node);
- Reduction ReduceStringToLowerCaseIntl(Node* node);
- Reduction ReduceStringToUpperCaseIntl(Node* node);
+ Reduction ReduceStringConcat(Node* node);
Reduction ReduceArrayBufferIsView(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 1f8e7a2cef..12fb14c6fc 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api.h"
+#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -24,6 +25,145 @@ namespace v8 {
namespace internal {
namespace compiler {
+Reduction JSCallReducer::ReduceMathUnary(Node* node, const Operator* op) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* input = NodeProperties::GetValueInput(node, 2);
+
+ input = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ input, effect, control);
+ Node* value = graph()->NewNode(op, input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+Reduction JSCallReducer::ReduceMathBinary(Node* node, const Operator* op) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 4) {
+ Node* value = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* left = NodeProperties::GetValueInput(node, 2);
+ Node* right = NodeProperties::GetValueInput(node, 3);
+ left = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ left, effect, control);
+ right = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ right, effect, control);
+ Node* value = graph()->NewNode(op, left, right);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+Reduction JSCallReducer::ReduceMathImul(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 4) {
+ Node* value = jsgraph()->ZeroConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* left = NodeProperties::GetValueInput(node, 2);
+ Node* right = NodeProperties::GetValueInput(node, 3);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ left = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ left, effect, control);
+ right = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ right, effect, control);
+ left = graph()->NewNode(simplified()->NumberToUint32(), left);
+ right = graph()->NewNode(simplified()->NumberToUint32(), right);
+ Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSCallReducer::ReduceMathClz32(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->Constant(32);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ input = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ input, effect, control);
+ input = graph()->NewNode(simplified()->NumberToUint32(), input);
+ Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
+// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
+Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
+ Node* empty_value) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() <= 2) {
+ ReplaceWithValue(node, empty_value);
+ return Replace(empty_value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+ for (int i = 3; i < node->op()->ValueInputCount(); i++) {
+ Node* input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ NodeProperties::GetValueInput(node, i), effect, control);
+ value = graph()->NewNode(op, value, input);
+ }
+
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -112,7 +252,7 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
}
} else {
ReplaceWithValue(node, value);
- return Replace(node);
+ return Replace(value);
}
return NoChange();
}
@@ -338,12 +478,25 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- Handle<JSFunction> call = Handle<JSFunction>::cast(
- HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
// Change context of {node} to the Function.prototype.call context,
// to ensure any exception is thrown in the correct context.
- NodeProperties::ReplaceContextInput(
- node, jsgraph()->HeapConstant(handle(call->context(), isolate())));
+ Node* context;
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ context = jsgraph()->HeapConstant(handle(function->context(), isolate()));
+ } else {
+ context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+ }
+ NodeProperties::ReplaceContextInput(node, context);
+ NodeProperties::ReplaceEffectInput(node, effect);
+
// Remove the target from {node} and use the receiver as target instead, and
// the thisArg becomes the new target. If thisArg was not provided, insert
// undefined instead.
@@ -672,14 +825,14 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Node* vtrue;
{
Callable callable = CodeFactory::GetProperty(isolate());
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
vtrue = etrue = if_true =
- graph()->NewNode(common()->Call(desc), stub_code, target, key, context,
- frame_state, etrue, if_true);
+ graph()->NewNode(common()->Call(call_descriptor), stub_code, target,
+ key, context, frame_state, etrue, if_true);
}
// Rewire potential exception edges.
@@ -810,8 +963,8 @@ void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
eloop->ReplaceInput(1, effect);
}
-Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayForEach(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -884,7 +1037,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
@@ -907,7 +1060,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -955,7 +1108,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -998,254 +1151,21 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
return Replace(jsgraph()->UndefinedConstant());
}
-Reduction JSCallReducer::ReduceArrayReduce(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayReduce(Node* node,
+ ArrayReduceDirection direction,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
-
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
-
- // Try to determine the {receiver} map.
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* fncallback = node->op()->ValueInputCount() > 2
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
-
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
-
- ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
- ? PACKED_DOUBLE_ELEMENTS
- : PACKED_ELEMENTS;
- for (Handle<Map> receiver_map : receiver_maps) {
- ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
- if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsHoleyElementsKind(next_kind)) {
- kind = HOLEY_ELEMENTS;
- }
- }
-
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
-
- // If we have unreliable maps, we need a map check.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
- }
-
- Node* original_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
- receiver, effect, control);
-
- Node* k = jsgraph()->ZeroConstant();
-
- std::vector<Node*> checkpoint_params({receiver, fncallback, k,
- original_length,
- jsgraph()->UndefinedConstant()});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
-
- // Check whether the given callback function is callable. Note that this has
- // to happen outside the loop to make sure we also throw on empty arrays.
- Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
- Node* check_fail = nullptr;
- Node* check_throw = nullptr;
- WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
- &control, &check_fail, &check_throw);
-
- // Set initial accumulator value
- Node* cur = jsgraph()->TheHoleConstant();
-
- Node* initial_element_check_fail = nullptr;
- Node* initial_element_check_throw = nullptr;
- if (node->op()->ValueInputCount() > 3) {
- cur = NodeProperties::GetValueInput(node, 3);
- } else {
- Node* check =
- graph()->NewNode(simplified()->NumberEqual(), original_length, k);
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- initial_element_check_fail =
- graph()->NewNode(common()->IfTrue(), check_branch);
- initial_element_check_throw = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
- context, check_frame_state, effect, initial_element_check_fail);
- control = graph()->NewNode(common()->IfFalse(), check_branch);
-
- cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- k = graph()->NewNode(simplified()->NumberAdd(), k,
- jsgraph()->OneConstant());
- }
-
- // Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* kloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
- Node* curloop = cur = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
- checkpoint_params[2] = k;
- checkpoint_params[4] = curloop;
-
- control = loop;
- effect = eloop;
-
- Node* continue_test =
- graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- continue_test, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
- control = if_true;
-
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopEagerDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
- outer_frame_state, ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
-
- // Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
-
- Node* element =
- SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
- Node* next_k =
- graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
- checkpoint_params[2] = next_k;
-
- Node* hole_true = nullptr;
- Node* hole_false = nullptr;
- Node* effect_true = effect;
-
- if (IsHoleyElementsKind(kind)) {
- // Holey elements kind require a hole check and skipping of the element in
- // the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- hole_true = graph()->NewNode(common()->IfTrue(), branch);
- hole_false = graph()->NewNode(common()->IfFalse(), branch);
- control = hole_false;
-
- // The contract is that we don't leak "the hole" into "user JavaScript",
- // so we must rename the {element} here to explicitly exclude "the hole"
- // from the type of {element}.
- element = effect = graph()->NewNode(
- common()->TypeGuard(Type::NonInternal()), element, effect, control);
- }
-
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
-
- Node* next_cur = control = effect =
- graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
- jsgraph()->UndefinedConstant(), cur, element, k,
- receiver, context, frame_state, effect, control);
-
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
- &check_fail, &control);
- }
-
- if (IsHoleyElementsKind(kind)) {
- Node* after_call_control = control;
- Node* after_call_effect = effect;
- control = hole_true;
- effect = effect_true;
-
- control = graph()->NewNode(common()->Merge(2), control, after_call_control);
- effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
- control);
- next_cur =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
- next_cur, control);
- }
-
- k = next_k;
- cur = next_cur;
-
- loop->ReplaceInput(1, control);
- kloop->ReplaceInput(1, k);
- curloop->ReplaceInput(1, cur);
- eloop->ReplaceInput(1, effect);
-
- control = if_false;
- effect = eloop;
-
- // Wire up the branch for the case when IsCallable fails for the callback.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the successful
- // completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), check_throw, check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
-
- if (node->op()->ValueInputCount() <= 3) {
- // Wire up the branch for the case when an array is empty.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the
- // successful completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), initial_element_check_throw,
- initial_element_check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- }
-
- ReplaceWithValue(node, curloop, effect, control);
- return Replace(curloop);
-} // namespace compiler
-
-Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
- Node* node) {
- if (!FLAG_turbo_inline_array_builtins) return NoChange();
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ bool left = direction == ArrayReduceDirection::kLeft;
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
- return NoChange();
- }
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -1258,25 +1178,22 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
- ? PACKED_DOUBLE_ELEMENTS
- : PACKED_ELEMENTS;
+ ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
- if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
- }
- if (IsHoleyElementsKind(next_kind)) {
- kind = HOLEY_ELEMENTS;
- }
}
+ std::function<Node*(Node*)> hole_check = [this, kind](Node* element) {
+ if (IsDoubleElementsKind(kind)) {
+ return graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ return graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ };
+
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
@@ -1293,48 +1210,84 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
- Node* k = graph()->NewNode(simplified()->NumberSubtract(), original_length,
- jsgraph()->OneConstant());
-
- std::vector<Node*> checkpoint_params({receiver, fncallback, k,
- original_length,
- jsgraph()->UndefinedConstant()});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* initial_index =
+ left ? jsgraph()->ZeroConstant()
+ : graph()->NewNode(simplified()->NumberSubtract(), original_length,
+ jsgraph()->OneConstant());
+ const Operator* next_op =
+ left ? simplified()->NumberAdd() : simplified()->NumberSubtract();
+ Node* k = initial_index;
- // Check whether the given callback function is callable. Note that this has
- // to happen outside the loop to make sure we also throw on empty arrays.
- Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_frame_state;
+ {
+ Builtins::Name builtin_lazy =
+ left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
+ : Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, k, original_length,
+ jsgraph()->UndefinedConstant()});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+ }
Node* check_fail = nullptr;
Node* check_throw = nullptr;
+ // Check whether the given callback function is callable. Note that
+ // this has to happen outside the loop to make sure we also throw on
+ // empty arrays.
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
// Set initial accumulator value
- Node* cur = nullptr;
+ Node* cur = jsgraph()->TheHoleConstant();
- Node* initial_element_check_fail = nullptr;
- Node* initial_element_check_throw = nullptr;
if (node->op()->ValueInputCount() > 3) {
cur = NodeProperties::GetValueInput(node, 3);
} else {
- Node* check = graph()->NewNode(simplified()->NumberEqual(), original_length,
- jsgraph()->SmiConstant(0));
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- initial_element_check_fail =
- graph()->NewNode(common()->IfTrue(), check_branch);
- initial_element_check_throw = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
- context, check_frame_state, effect, initial_element_check_fail);
- control = graph()->NewNode(common()->IfFalse(), check_branch);
+ // Find first/last non holey element. In case the search fails, we need a
+ // deopt continuation.
+ Builtins::Name builtin_eager =
+ left ? Builtins::kArrayReducePreLoopEagerDeoptContinuation
+ : Builtins::kArrayReduceRightPreLoopEagerDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* find_first_element_frame_state =
+ CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_eager, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node* loop = control;
+ Node* eloop = effect;
+ effect = graph()->NewNode(common()->Checkpoint(),
+ find_first_element_frame_state, effect, control);
+ Node* continue_test =
+ left ? graph()->NewNode(simplified()->NumberLessThan(), k,
+ original_length)
+ : graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoInitialElement),
+ continue_test, effect, control);
cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- k = graph()->NewNode(simplified()->NumberSubtract(), k,
- jsgraph()->OneConstant());
+ Node* next_k = graph()->NewNode(next_op, k, jsgraph()->OneConstant());
+
+ Node* hole_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ hole_check(cur), control);
+ Node* found_el = graph()->NewNode(common()->IfFalse(), hole_branch);
+ control = found_el;
+ Node* is_hole = graph()->NewNode(common()->IfTrue(), hole_branch);
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, is_hole, effect);
+ // We did the hole-check, so exclude hole from the type.
+ cur = effect = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ cur, effect, control);
+ k = next_k;
}
// Start the loop.
@@ -1347,14 +1300,16 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
Node* curloop = cur = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
- checkpoint_params[2] = k;
- checkpoint_params[4] = curloop;
control = loop;
effect = eloop;
- Node* continue_test = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), k);
+ Node* continue_test =
+ left
+ ? graph()->NewNode(simplified()->NumberLessThan(), k, original_length)
+ : graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
continue_test, control);
@@ -1362,14 +1317,20 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
control = if_true;
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function,
- Builtins::kArrayReduceRightLoopEagerDeoptContinuation, node->InputAt(0),
- context, &checkpoint_params[0], stack_parameters, outer_frame_state,
- ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ {
+ Builtins::Name builtin_eager =
+ left ? Builtins::kArrayReduceLoopEagerDeoptContinuation
+ : Builtins::kArrayReduceRightLoopEagerDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, k, original_length, curloop});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_eager, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
// Make sure the map hasn't changed during the iteration
effect = graph()->NewNode(
@@ -1379,9 +1340,7 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- Node* next_k = graph()->NewNode(simplified()->NumberSubtract(), k,
- jsgraph()->OneConstant());
- checkpoint_params[2] = next_k;
+ Node* next_k = graph()->NewNode(next_op, k, jsgraph()->OneConstant());
Node* hole_true = nullptr;
Node* hole_false = nullptr;
@@ -1390,10 +1349,8 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
if (IsHoleyElementsKind(kind)) {
// Holey elements kind require a hole check and skipping of the element in
// the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ hole_check(element), control);
hole_true = graph()->NewNode(common()->IfTrue(), branch);
hole_false = graph()->NewNode(common()->IfFalse(), branch);
control = hole_false;
@@ -1405,15 +1362,24 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* next_cur;
+ {
+ Builtins::Name builtin_lazy =
+ left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
+ : Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, next_k, original_length, curloop});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
- Node* next_cur = control = effect =
- graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
- jsgraph()->UndefinedConstant(), cur, element, k,
- receiver, context, frame_state, effect, control);
+ next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
+ }
// Rewire potential exception edges.
Node* on_exception = nullptr;
@@ -1455,23 +1421,12 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- if (node->op()->ValueInputCount() <= 3) {
- // Wire up the branch for the case when an array is empty.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the
- // successful completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), initial_element_check_throw,
- initial_element_check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- }
-
ReplaceWithValue(node, curloop, effect, control);
return Replace(curloop);
-} // namespace compiler
+}
-Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayMap(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1498,7 +1453,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -1547,7 +1502,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
@@ -1570,7 +1525,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1619,7 +1574,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1671,8 +1626,8 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
return Replace(a);
}
-Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayFilter(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1698,7 +1653,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
// The output array is packed (filter doesn't visit holes).
@@ -1766,7 +1721,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -1794,7 +1749,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1851,7 +1806,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1880,7 +1835,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
callback_value});
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1926,9 +1881,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
return Replace(a);
}
-Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
- Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2014,8 +1968,8 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
Node* check_throw = nullptr;
{
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, lazy_continuation_builtin, node->InputAt(0),
- context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ jsgraph(), shared, lazy_continuation_builtin, node->InputAt(0), context,
+ &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, frame_state, effect,
&control, &check_fail, &check_throw);
@@ -2040,7 +1994,7 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
// Check the map hasn't changed during the iteration.
{
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, eager_continuation_builtin, node->InputAt(0),
+ jsgraph(), shared, eager_continuation_builtin, node->InputAt(0),
context, &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::EAGER);
@@ -2083,7 +2037,7 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
static_cast<int>(call_checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, after_callback_lazy_continuation_builtin,
+ jsgraph(), shared, after_callback_lazy_continuation_builtin,
node->InputAt(0), context, &call_checkpoint_params[0],
call_stack_parameters, outer_frame_state,
ContinuationFrameStateMode::LAZY);
@@ -2264,8 +2218,8 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
return element;
}
-Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayEvery(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2291,7 +2245,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -2336,7 +2290,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -2362,7 +2316,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -2418,7 +2372,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -2484,8 +2438,8 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
return Replace(return_value);
}
-Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArraySome(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2511,17 +2465,12 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
if (receiver_maps.size() == 0) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(pwong): Handle holey double elements kinds.
- if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
// We can handle different maps, as long as their elements kind are the
@@ -2563,7 +2512,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -2594,7 +2543,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -2621,8 +2570,13 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
if (IsHoleyElementsKind(kind)) {
// Holey elements kind require a hole check and skipping of the element in
// the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
hole_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -2645,7 +2599,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -2713,19 +2667,20 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
return Replace(return_value);
}
-Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
- Handle<JSFunction> function) {
+Reduction JSCallReducer::ReduceCallApiFunction(
+ Node* node, Handle<SharedFunctionInfo> shared) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
+ Node* target = NodeProperties::GetValueInput(node, 0);
Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
? jsgraph()->HeapConstant(global_proxy())
: NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(function->shared()->function_data()));
- Handle<Context> context(function->context());
+ FunctionTemplateInfo::cast(shared->function_data()));
// CallApiCallbackStub expects the target in a register, so we count it out,
// and counts the receiver as an implicit argument, so we count the receiver
@@ -2774,6 +2729,11 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
}
}
+ // Load the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
// CallApiCallbackStub's register arguments: code, target, call data, holder,
// function address.
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
@@ -2784,7 +2744,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
Handle<Object> data(call_handler_info->data(), isolate());
CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), cid,
cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
@@ -2797,14 +2757,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
&api_function, ExternalReference::DIRECT_API_CALL, isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(stub.GetCode()));
- node->ReplaceInput(1, jsgraph()->Constant(context));
+ node->ReplaceInput(1, context);
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
node->InsertInput(graph()->zone(), 3, holder);
node->InsertInput(graph()->zone(), 4,
jsgraph()->ExternalConstant(function_reference));
node->ReplaceInput(5, receiver);
- // Remove context input.
- node->RemoveInput(6 + argc);
+ node->RemoveInput(6 + argc); // Remove context input.
+ node->ReplaceInput(7 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -3082,93 +3042,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (m.HasValue()) {
if (m.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
-
- // Raise a TypeError if the {target} is a "classConstructor".
- if (IsClassConstructor(shared->kind())) {
- NodeProperties::ReplaceValueInputs(node, target);
- NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(
- Runtime::kThrowConstructorNonCallableError, 1));
- return Changed(node);
- }
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
- // Check for known builtin functions.
- switch (shared->code()->builtin_index()) {
- case Builtins::kArrayConstructor:
- return ReduceArrayConstructor(node);
- case Builtins::kBooleanConstructor:
- return ReduceBooleanConstructor(node);
- case Builtins::kFunctionPrototypeApply:
- return ReduceFunctionPrototypeApply(node);
- case Builtins::kFastFunctionPrototypeBind:
- return ReduceFunctionPrototypeBind(node);
- case Builtins::kFunctionPrototypeCall:
- return ReduceFunctionPrototypeCall(node);
- case Builtins::kFunctionPrototypeHasInstance:
- return ReduceFunctionPrototypeHasInstance(node);
- case Builtins::kObjectConstructor:
- return ReduceObjectConstructor(node);
- case Builtins::kObjectGetPrototypeOf:
- return ReduceObjectGetPrototypeOf(node);
- case Builtins::kObjectIs:
- return ReduceObjectIs(node);
- case Builtins::kObjectPrototypeGetProto:
- return ReduceObjectPrototypeGetProto(node);
- case Builtins::kObjectPrototypeHasOwnProperty:
- return ReduceObjectPrototypeHasOwnProperty(node);
- case Builtins::kObjectPrototypeIsPrototypeOf:
- return ReduceObjectPrototypeIsPrototypeOf(node);
- case Builtins::kReflectApply:
- return ReduceReflectApply(node);
- case Builtins::kReflectConstruct:
- return ReduceReflectConstruct(node);
- case Builtins::kReflectGet:
- return ReduceReflectGet(node);
- case Builtins::kReflectGetPrototypeOf:
- return ReduceReflectGetPrototypeOf(node);
- case Builtins::kReflectHas:
- return ReduceReflectHas(node);
- case Builtins::kArrayForEach:
- return ReduceArrayForEach(function, node);
- case Builtins::kArrayMap:
- return ReduceArrayMap(function, node);
- case Builtins::kArrayFilter:
- return ReduceArrayFilter(function, node);
- case Builtins::kArrayReduce:
- return ReduceArrayReduce(function, node);
- case Builtins::kArrayReduceRight:
- return ReduceArrayReduceRight(function, node);
- case Builtins::kArrayPrototypeFind:
- return ReduceArrayFind(ArrayFindVariant::kFind, function, node);
- case Builtins::kArrayPrototypeFindIndex:
- return ReduceArrayFind(ArrayFindVariant::kFindIndex, function, node);
- case Builtins::kArrayEvery:
- return ReduceArrayEvery(function, node);
- case Builtins::kArrayPrototypePush:
- return ReduceArrayPrototypePush(node);
- case Builtins::kArrayPrototypePop:
- return ReduceArrayPrototypePop(node);
- case Builtins::kArrayPrototypeShift:
- return ReduceArrayPrototypeShift(node);
- case Builtins::kReturnReceiver:
- return ReduceReturnReceiver(node);
- case Builtins::kStringPrototypeIndexOf:
- return ReduceStringPrototypeIndexOf(function, node);
- case Builtins::kStringPrototypeCharAt:
- return ReduceStringPrototypeCharAt(node);
- case Builtins::kStringPrototypeCharCodeAt:
- return ReduceStringPrototypeCharCodeAt(node);
- default:
- break;
- }
-
- if (!FLAG_runtime_stats && shared->IsApiFunction()) {
- return ReduceCallApiFunction(node, function);
- }
+ return ReduceJSCall(node, handle(function->shared(), isolate()));
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -3206,6 +3084,15 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+ // If {target} is the result of a JSCreateClosure operation, we can
+ // just immediately try to inline based on the SharedFunctionInfo,
+ // since TurboFan generally doesn't inline cross-context, and hence
+ // the {target} must have the same native context as the call site.
+ if (target->opcode() == IrOpcode::kJSCreateClosure) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(target->op());
+ return ReduceJSCall(node, p.shared_info());
+ }
+
// If {target} is the result of a JSCreateBoundFunction operation,
// we can just fold the construction and call the bound target
// function directly instead.
@@ -3240,9 +3127,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
- // Extract feedback from the {node} using the CallICNexus.
+ // Extract feedback from the {node} using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
if (flags() & kBailoutOnUninitialized) {
// Introduce a SOFT deopt if the call {node} wasn't executed so far.
@@ -3281,6 +3168,209 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSCall(Node* node,
+ Handle<SharedFunctionInfo> shared) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+
+ // Do not reduce calls to functions with break points.
+ if (shared->HasBreakInfo()) return NoChange();
+
+ // Raise a TypeError if the {target} is a "classConstructor".
+ if (IsClassConstructor(shared->kind())) {
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructorNonCallableError, 1));
+ return Changed(node);
+ }
+
+ // Check for known builtin functions.
+ switch (shared->code()->builtin_index()) {
+ case Builtins::kArrayConstructor:
+ return ReduceArrayConstructor(node);
+ case Builtins::kBooleanConstructor:
+ return ReduceBooleanConstructor(node);
+ case Builtins::kFunctionPrototypeApply:
+ return ReduceFunctionPrototypeApply(node);
+ case Builtins::kFastFunctionPrototypeBind:
+ return ReduceFunctionPrototypeBind(node);
+ case Builtins::kFunctionPrototypeCall:
+ return ReduceFunctionPrototypeCall(node);
+ case Builtins::kFunctionPrototypeHasInstance:
+ return ReduceFunctionPrototypeHasInstance(node);
+ case Builtins::kObjectConstructor:
+ return ReduceObjectConstructor(node);
+ case Builtins::kObjectGetPrototypeOf:
+ return ReduceObjectGetPrototypeOf(node);
+ case Builtins::kObjectIs:
+ return ReduceObjectIs(node);
+ case Builtins::kObjectPrototypeGetProto:
+ return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kObjectPrototypeHasOwnProperty:
+ return ReduceObjectPrototypeHasOwnProperty(node);
+ case Builtins::kObjectPrototypeIsPrototypeOf:
+ return ReduceObjectPrototypeIsPrototypeOf(node);
+ case Builtins::kReflectApply:
+ return ReduceReflectApply(node);
+ case Builtins::kReflectConstruct:
+ return ReduceReflectConstruct(node);
+ case Builtins::kReflectGet:
+ return ReduceReflectGet(node);
+ case Builtins::kReflectGetPrototypeOf:
+ return ReduceReflectGetPrototypeOf(node);
+ case Builtins::kReflectHas:
+ return ReduceReflectHas(node);
+ case Builtins::kArrayForEach:
+ return ReduceArrayForEach(node, shared);
+ case Builtins::kArrayMap:
+ return ReduceArrayMap(node, shared);
+ case Builtins::kArrayFilter:
+ return ReduceArrayFilter(node, shared);
+ case Builtins::kArrayReduce:
+ return ReduceArrayReduce(node, ArrayReduceDirection::kLeft, shared);
+ case Builtins::kArrayReduceRight:
+ return ReduceArrayReduce(node, ArrayReduceDirection::kRight, shared);
+ case Builtins::kArrayPrototypeFind:
+ return ReduceArrayFind(node, ArrayFindVariant::kFind, shared);
+ case Builtins::kArrayPrototypeFindIndex:
+ return ReduceArrayFind(node, ArrayFindVariant::kFindIndex, shared);
+ case Builtins::kArrayEvery:
+ return ReduceArrayEvery(node, shared);
+ case Builtins::kArraySome:
+ return ReduceArraySome(node, shared);
+ case Builtins::kArrayPrototypePush:
+ return ReduceArrayPrototypePush(node);
+ case Builtins::kArrayPrototypePop:
+ return ReduceArrayPrototypePop(node);
+ case Builtins::kArrayPrototypeShift:
+ return ReduceArrayPrototypeShift(node);
+ case Builtins::kMathAbs:
+ return ReduceMathUnary(node, simplified()->NumberAbs());
+ case Builtins::kMathAcos:
+ return ReduceMathUnary(node, simplified()->NumberAcos());
+ case Builtins::kMathAcosh:
+ return ReduceMathUnary(node, simplified()->NumberAcosh());
+ case Builtins::kMathAsin:
+ return ReduceMathUnary(node, simplified()->NumberAsin());
+ case Builtins::kMathAsinh:
+ return ReduceMathUnary(node, simplified()->NumberAsinh());
+ case Builtins::kMathAtan:
+ return ReduceMathUnary(node, simplified()->NumberAtan());
+ case Builtins::kMathAtanh:
+ return ReduceMathUnary(node, simplified()->NumberAtanh());
+ case Builtins::kMathCbrt:
+ return ReduceMathUnary(node, simplified()->NumberCbrt());
+ case Builtins::kMathCeil:
+ return ReduceMathUnary(node, simplified()->NumberCeil());
+ case Builtins::kMathCos:
+ return ReduceMathUnary(node, simplified()->NumberCos());
+ case Builtins::kMathCosh:
+ return ReduceMathUnary(node, simplified()->NumberCosh());
+ case Builtins::kMathExp:
+ return ReduceMathUnary(node, simplified()->NumberExp());
+ case Builtins::kMathExpm1:
+ return ReduceMathUnary(node, simplified()->NumberExpm1());
+ case Builtins::kMathFloor:
+ return ReduceMathUnary(node, simplified()->NumberFloor());
+ case Builtins::kMathFround:
+ return ReduceMathUnary(node, simplified()->NumberFround());
+ case Builtins::kMathLog:
+ return ReduceMathUnary(node, simplified()->NumberLog());
+ case Builtins::kMathLog1p:
+ return ReduceMathUnary(node, simplified()->NumberLog1p());
+ case Builtins::kMathLog10:
+ return ReduceMathUnary(node, simplified()->NumberLog10());
+ case Builtins::kMathLog2:
+ return ReduceMathUnary(node, simplified()->NumberLog2());
+ case Builtins::kMathRound:
+ return ReduceMathUnary(node, simplified()->NumberRound());
+ case Builtins::kMathSign:
+ return ReduceMathUnary(node, simplified()->NumberSign());
+ case Builtins::kMathSin:
+ return ReduceMathUnary(node, simplified()->NumberSin());
+ case Builtins::kMathSinh:
+ return ReduceMathUnary(node, simplified()->NumberSinh());
+ case Builtins::kMathSqrt:
+ return ReduceMathUnary(node, simplified()->NumberSqrt());
+ case Builtins::kMathTan:
+ return ReduceMathUnary(node, simplified()->NumberTan());
+ case Builtins::kMathTanh:
+ return ReduceMathUnary(node, simplified()->NumberTanh());
+ case Builtins::kMathTrunc:
+ return ReduceMathUnary(node, simplified()->NumberTrunc());
+ case Builtins::kMathAtan2:
+ return ReduceMathBinary(node, simplified()->NumberAtan2());
+ case Builtins::kMathPow:
+ return ReduceMathBinary(node, simplified()->NumberPow());
+ case Builtins::kMathClz32:
+ return ReduceMathClz32(node);
+ case Builtins::kMathImul:
+ return ReduceMathImul(node);
+ case Builtins::kMathMax:
+ return ReduceMathMinMax(node, simplified()->NumberMax(),
+ jsgraph()->Constant(-V8_INFINITY));
+ case Builtins::kMathMin:
+ return ReduceMathMinMax(node, simplified()->NumberMin(),
+ jsgraph()->Constant(V8_INFINITY));
+ case Builtins::kReturnReceiver:
+ return ReduceReturnReceiver(node);
+ case Builtins::kStringPrototypeIndexOf:
+ return ReduceStringPrototypeIndexOf(node);
+ case Builtins::kStringPrototypeCharAt:
+ return ReduceStringPrototypeStringAt(simplified()->StringCharAt(), node);
+ case Builtins::kStringPrototypeCharCodeAt:
+ return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
+ node);
+ case Builtins::kStringPrototypeCodePointAt:
+ return ReduceStringPrototypeStringAt(
+ simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node);
+ case Builtins::kStringPrototypeSubstring:
+ return ReduceStringPrototypeSubstring(node);
+ case Builtins::kStringPrototypeSlice:
+ return ReduceStringPrototypeSlice(node);
+#ifdef V8_INTL_SUPPORT
+ case Builtins::kStringPrototypeToLowerCaseIntl:
+ return ReduceStringPrototypeToLowerCaseIntl(node);
+ case Builtins::kStringPrototypeToUpperCaseIntl:
+ return ReduceStringPrototypeToUpperCaseIntl(node);
+#endif // V8_INTL_SUPPORT
+ case Builtins::kStringFromCharCode:
+ return ReduceStringFromCharCode(node);
+ case Builtins::kStringPrototypeIterator:
+ return ReduceStringPrototypeIterator(node);
+ case Builtins::kStringIteratorPrototypeNext:
+ return ReduceStringIteratorPrototypeNext(node);
+ case Builtins::kAsyncFunctionPromiseCreate:
+ return ReduceAsyncFunctionPromiseCreate(node);
+ case Builtins::kAsyncFunctionPromiseRelease:
+ return ReduceAsyncFunctionPromiseRelease(node);
+ case Builtins::kPromiseCapabilityDefaultReject:
+ return ReducePromiseCapabilityDefaultReject(node);
+ case Builtins::kPromiseCapabilityDefaultResolve:
+ return ReducePromiseCapabilityDefaultResolve(node);
+ case Builtins::kPromiseInternalConstructor:
+ return ReducePromiseInternalConstructor(node);
+ case Builtins::kPromiseInternalReject:
+ return ReducePromiseInternalReject(node);
+ case Builtins::kPromiseInternalResolve:
+ return ReducePromiseInternalResolve(node);
+ case Builtins::kPromisePrototypeCatch:
+ return ReducePromisePrototypeCatch(node);
+ case Builtins::kPromisePrototypeFinally:
+ return ReducePromisePrototypeFinally(node);
+ case Builtins::kPromisePrototypeThen:
+ return ReducePromisePrototypeThen(node);
+ default:
+ break;
+ }
+
+ if (!FLAG_runtime_stats && shared->IsApiFunction()) {
+ return ReduceCallApiFunction(node, shared);
+ }
+ return NoChange();
+}
+
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -3310,9 +3400,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Extract feedback from the {node} using the CallICNexus.
+ // Extract feedback from the {node} using the FeedbackNexus.
if (p.feedback().IsValid()) {
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
if (flags() & kBailoutOnUninitialized) {
// Introduce a SOFT deopt if the construct {node} wasn't executed so
@@ -3394,6 +3484,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
if (m.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ // Do not reduce constructors with break points.
+ if (function->shared()->HasBreakInfo()) return NoChange();
+
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
@@ -3431,6 +3524,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return Changed(node);
}
}
+
+ // Check for the PromiseConstructor
+ if (*function == function->native_context()->promise_function()) {
+ return ReducePromiseConstructor(node);
+ }
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -3516,10 +3614,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
- Handle<JSFunction> function, Node* node) {
+// ES #sec-string.prototype.indexof
+Reduction JSCallReducer::ReduceStringPrototypeIndexOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -3557,6 +3653,171 @@ Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
return NoChange();
}
+// ES #sec-string.prototype.substring
+Reduction JSCallReducer::ReduceStringPrototypeSubstring(Node* node) {
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = NodeProperties::GetValueInput(node, 2);
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ start = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), start,
+ effect, control);
+
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), end,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = length;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ end, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ end = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ Node* finalStart =
+ graph()->NewNode(simplified()->NumberMin(),
+ graph()->NewNode(simplified()->NumberMax(), start,
+ jsgraph()->ZeroConstant()),
+ length);
+ Node* finalEnd =
+ graph()->NewNode(simplified()->NumberMin(),
+ graph()->NewNode(simplified()->NumberMax(), end,
+ jsgraph()->ZeroConstant()),
+ length);
+
+ Node* from =
+ graph()->NewNode(simplified()->NumberMin(), finalStart, finalEnd);
+ Node* to = graph()->NewNode(simplified()->NumberMax(), finalStart, finalEnd);
+
+ Node* value = effect = graph()->NewNode(simplified()->StringSubstring(),
+ receiver, from, to, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES #sec-string.prototype.slice
+Reduction JSCallReducer::ReduceStringPrototypeSlice(Node* node) {
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = NodeProperties::GetValueInput(node, 2);
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ start = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), start,
+ effect, control);
+
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Replace {end} argument with {length} if it is undefined.
+ {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), end,
+ jsgraph()->UndefinedConstant());
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = length;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), end, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ end = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ Node* from = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), start,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(
+ simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, start),
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMin(), start, length));
+ // {from} is always in non-negative Smi range, but our typer cannot
+ // figure that out yet.
+ from = effect = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()),
+ from, effect, control);
+
+ Node* to = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), end,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, end),
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMin(), end, length));
+ // {to} is always in non-negative Smi range, but our typer cannot
+ // figure that out yet.
+ to = effect = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), to,
+ effect, control);
+
+ Node* result_string = nullptr;
+ // Return empty string if {from} is smaller than {to}.
+ {
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), from, to);
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringSubstring(),
+ receiver, from, to, etrue, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ result_string =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ ReplaceWithValue(node, result_string, effect, control);
+ return Replace(result_string);
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -3766,7 +4027,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
return NoChange();
- if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
}
@@ -3882,7 +4143,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
return NoChange();
- if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
}
@@ -4005,7 +4266,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
{
// Call the generic C++ implementation.
const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
Builtins::name(builtin_index), node->op()->properties(),
CallDescriptor::kNeedsFrameState);
@@ -4017,7 +4278,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
Node* argc =
jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver,
jsgraph()->PaddingConstant(), argc, target,
jsgraph()->UndefinedConstant(), entry, argc, context,
frame_state, efalse1, if_false1);
@@ -4048,7 +4309,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
}
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+// ES6 section 21.1.3.3 String.prototype.codePointAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeStringAt(
+ const Operator* string_access_operator, Node* node) {
+ DCHECK(string_access_operator->opcode() == IrOpcode::kStringCharAt ||
+ string_access_operator->opcode() == IrOpcode::kStringCharCodeAt ||
+ string_access_operator->opcode() == IrOpcode::kStringCodePointAt);
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -4056,112 +4323,919 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
}
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* index = jsgraph()->ZeroConstant();
+ Node* index = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Ensure that the {receiver} is actually a String.
receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
receiver, effect, control);
- if (node->op()->ValueInputCount() >= 3) {
- index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- NodeProperties::GetValueInput(node, 2),
- effect, control);
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
// Determine the {receiver} length.
Node* receiver_length =
graph()->NewNode(simplified()->StringLength(), receiver);
- // Check if {index} is less than {receiver} length.
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ // Check that the {index} is within range.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
index, receiver_length);
+ Node* value = effect = graph()->NewNode(string_access_operator, receiver,
+ masked_index, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+#ifdef V8_INTL_SUPPORT
+
+Reduction JSCallReducer::ReduceStringPrototypeToLowerCaseIntl(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceStringPrototypeToUpperCaseIntl(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+}
+
+#endif // V8_INTL_SUPPORT
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+Reduction JSCallReducer::ReduceStringFromCharCode(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() == 3) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* input = NodeProperties::GetValueInput(node, 2);
+
+ input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ input, effect, control);
+
+ Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+ Node* iterator = effect =
+ graph()->NewNode(javascript()->CreateStringIterator(), receiver,
+ jsgraph()->NoContextConstant(), effect);
+ ReplaceWithValue(node, iterator, effect, control);
+ return Replace(iterator);
+}
+
+Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ JS_STRING_ITERATOR_TYPE)) {
+ Node* string = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
+ receiver, effect, control);
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
+
+ // branch0: if (index < length)
+ Node* check0 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* done_true;
+ Node* vtrue0;
+ {
+ done_true = jsgraph()->FalseConstant();
+ Node* codepoint = etrue0 = graph()->NewNode(
+ simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string,
+ index, etrue0, if_true0);
+ vtrue0 = graph()->NewNode(
+ simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), codepoint);
+
+ // Update iterator.[[NextIndex]]
+ Node* char_length =
+ graph()->NewNode(simplified()->StringLength(), vtrue0);
+ index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
+ etrue0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, index, etrue0, if_true0);
+ }
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* done_false;
+ Node* vfalse0;
+ {
+ vfalse0 = jsgraph()->UndefinedConstant();
+ done_false = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ done_true, done_false, control);
+
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceAsyncFunctionPromiseCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // Morph this {node} into a JSCreatePromise node.
+ RelaxControls(node);
+ node->ReplaceInput(0, context);
+ node->ReplaceInput(1, effect);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, javascript()->CreatePromise());
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceAsyncFunctionPromiseRelease(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // The AsyncFunctionPromiseRelease builtin is a no-op as long as neither
+ // the debugger is active nor any promise hook has been installed (ever).
+ Node* value = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+// ES section #sec-promise-reject-functions
+Reduction JSCallReducer::ReducePromiseCapabilityDefaultReject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We need to execute in the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
+ // Grab the promise closed over by {target}.
+ Node* promise = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, effect, control);
+
+ // Check if the {promise} is still pending or already settled.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), promise,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
- // Return the empty string otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->EmptyStringConstant();
+ Node* efalse = effect;
+ {
+ // Mark the {promise} as settled.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, jsgraph()->UndefinedConstant(), efalse, if_false);
+
+ // Check if we should emit a debug event.
+ Node* debug_event = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kDebugEventSlot)),
+ context, efalse, if_false);
+
+ // Actually reject the {promise}.
+ efalse =
+ graph()->NewNode(javascript()->RejectPromise(), promise, resolution,
+ debug_event, context, frame_state, efalse, if_false);
+ }
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = jsgraph()->UndefinedConstant();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
-// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-Reduction JSCallReducer::ReduceStringPrototypeCharCodeAt(Node* node) {
+// ES section #sec-promise-resolve-functions
+Reduction JSCallReducer::ReducePromiseCapabilityDefaultResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We need to execute in the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
+ // Grab the promise closed over by {target}.
+ Node* promise = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, effect, control);
+
+ // Check if the {promise} is still pending or already settled.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), promise,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Mark the {promise} as settled.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, jsgraph()->UndefinedConstant(), efalse, if_false);
+
+ // Actually resolve the {promise}.
+ efalse =
+ graph()->NewNode(javascript()->ResolvePromise(), promise, resolution,
+ context, frame_state, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ Node* value = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Node* JSCallReducer::CreateArtificialFrameState(
+ Node* node, Node* outer_frame_state, int parameter_count,
+ BailoutId bailout_id, FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared) {
+ const FrameStateFunctionInfo* state_info =
+ common()->CreateFrameStateFunctionInfo(frame_state_type,
+ parameter_count + 1, 0, shared);
+
+ const Operator* op = common()->FrameState(
+ bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
+ Node* node0 = graph()->NewNode(op0);
+ std::vector<Node*> params;
+ for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
+ params.push_back(node->InputAt(1 + parameter));
+ }
+ const Operator* op_param = common()->StateValues(
+ static_cast<int>(params.size()), SparseInputMask::Dense());
+ Node* params_node = graph()->NewNode(
+ op_param, static_cast<int>(params.size()), &params.front());
+ return graph()->NewNode(op, params_node, node0, node0,
+ jsgraph()->UndefinedConstant(), node->InputAt(0),
+ outer_frame_state);
+}
+
+Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ // We only inline when we have the executor.
+ if (arity < 1) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* executor = NodeProperties::GetValueInput(node, 1);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (!FLAG_experimental_inline_promise_constructor) return NoChange();
+
+ // Only handle builtins Promises, not subclasses.
+ if (target != new_target) return NoChange();
+
+ // Add a code dependency on the promise hook protector.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ Handle<SharedFunctionInfo> promise_shared(
+ handle(native_context()->promise_function()->shared()));
+
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ // For the frame state, we only provide the executor parameter, even if more
+ // arugments were passed. This is not observable from JS.
+ DCHECK_EQ(1, promise_shared->internal_formal_parameter_count());
+ Node* constructor_frame_state = CreateArtificialFrameState(
+ node, outer_frame_state, 1, BailoutId::ConstructStubInvoke(),
+ FrameStateType::kConstructStub, promise_shared);
+
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ Node* checkpoint_params[] = {jsgraph()->UndefinedConstant(),
+ jsgraph()->UndefinedConstant()};
+ const int stack_parameters = arraysize(checkpoint_params);
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), promise_shared,
+ Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
+ &checkpoint_params[0], stack_parameters, constructor_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // Check if executor is callable
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ // TODO(petermarshall): The frame state is wrong here.
+ WireInCallbackIsCallableCheck(executor, context, frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Create the resulting JSPromise.
+ Node* promise = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ // 8. CreatePromiseResolvingFunctions
+ // Allocate a promise context for the closures below.
+ Node* promise_context = effect = graph()->NewNode(
+ javascript()->CreateFunctionContext(
+ PromiseBuiltinsAssembler::kPromiseContextLength, FUNCTION_SCOPE),
+ context, context, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ promise_context, promise, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kDebugEventSlot)),
+ promise_context, jsgraph()->TrueConstant(), effect, control);
+
+ // Allocate the closure for the resolve case.
+ Handle<SharedFunctionInfo> resolve_shared(
+ native_context()->promise_capability_default_resolve_shared_fun(),
+ isolate());
+ Node* resolve = effect =
+ graph()->NewNode(javascript()->CreateClosure(
+ resolve_shared, factory()->many_closures_cell(),
+ handle(resolve_shared->code(), isolate())),
+ promise_context, effect, control);
+
+ // Allocate the closure for the reject case.
+ Handle<SharedFunctionInfo> reject_shared(
+ native_context()->promise_capability_default_reject_shared_fun(),
+ isolate());
+ Node* reject = effect =
+ graph()->NewNode(javascript()->CreateClosure(
+ reject_shared, factory()->many_closures_cell(),
+ handle(reject_shared->code(), isolate())),
+ promise_context, effect, control);
+
+ // Re-use the params from above, but actually set the promise parameter now.
+ checkpoint_params[1] = promise;
+
+ // This simple continuation just returns the created promise.
+ // TODO(petermarshall): If the executor function causes lazy deopt, and it
+ // also throws an exception, we should catch the exception and call the reject
+ // function.
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), promise_shared,
+ Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
+ &checkpoint_params[0], stack_parameters, constructor_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // 9. Call executor with both resolving functions
+ effect = control = graph()->NewNode(
+ javascript()->Call(4, p.frequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNullOrUndefined,
+ SpeculationMode::kDisallowSpeculation),
+ executor, jsgraph()->UndefinedConstant(), resolve, reject, context,
+ frame_state, effect, control);
+
+ Node* exception_effect = effect;
+ Node* exception_control = control;
+ {
+ Node* reason = exception_effect = exception_control = graph()->NewNode(
+ common()->IfException(), exception_control, exception_effect);
+ // 10a. Call reject if the call to executor threw.
+ exception_effect = exception_control = graph()->NewNode(
+ javascript()->Call(3, p.frequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNullOrUndefined,
+ SpeculationMode::kDisallowSpeculation),
+ reject, jsgraph()->UndefinedConstant(), reason, context, frame_state,
+ exception_effect, exception_control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception,
+ exception_effect, &check_fail,
+ &exception_control);
+ }
+ }
+
+ Node* success_effect = effect;
+ Node* success_control = control;
+ {
+ success_control = graph()->NewNode(common()->IfSuccess(), success_control);
+ }
+
+ control =
+ graph()->NewNode(common()->Merge(2), success_control, exception_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), success_effect,
+ exception_effect, control);
+
+ // Wire up the branch for the case when IsCallable fails for the executor.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
+// V8 Extras: v8.createPromise(parent)
+Reduction JSCallReducer::ReducePromiseInternalConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // Create a new pending promise.
+ Node* value = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// V8 Extras: v8.rejectPromise(promise, reason)
+Reduction JSCallReducer::ReducePromiseInternalReject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* promise = node->op()->ValueInputCount() >= 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* reason = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* debug_event = jsgraph()->TrueConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Reject the {promise} using the given {reason}, and trigger debug logic.
+ Node* value = effect =
+ graph()->NewNode(javascript()->RejectPromise(), promise, reason,
+ debug_event, context, frame_state, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// V8 Extras: v8.resolvePromise(promise, resolution)
+Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* promise = node->op()->ValueInputCount() >= 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* resolution = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Resolve the {promise} using the given {resolution}.
+ Node* value = effect =
+ graph()->NewNode(javascript()->ResolvePromise(), promise, resolution,
+ context, frame_state, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES section #sec-promise.prototype.catch
+Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
+ int arity = static_cast<int>(p.arity() - 2);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check that the Promise.then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
+ }
+
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_then_protector());
+
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Massage the {node} to call "then" instead by first removing all inputs
+ // following the onRejected parameter, and then filling up the parameters
+ // to two inputs from the left with undefined.
+ Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ for (; arity > 1; --arity) node->RemoveInput(3);
+ for (; arity < 2; ++arity) {
+ node->InsertInput(graph()->zone(), 2, jsgraph()->UndefinedConstant());
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined,
+ p.speculation_mode()));
+ Reduction const reduction = ReducePromisePrototypeThen(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+// ES section #sec-promise.prototype.finally
+Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* index = jsgraph()->ZeroConstant();
+ Node* on_finally = arity >= 1 ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
- receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
- receiver, effect, control);
- if (node->op()->ValueInputCount() >= 3) {
- index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- NodeProperties::GetValueInput(node, 2),
- effect, control);
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ // Check that the Promise#then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
+
+ // Also check that the @@species protector is intact, which guards the
+ // lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
+ // the initial %PromisePrototype%, and the Symbol.species lookup on the
+ // %PromisePrototype%.
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
}
- // Determine the {receiver} length.
- Node* receiver_length =
- graph()->NewNode(simplified()->StringLength(), receiver);
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->AssumePropertyCell(factory()->promise_then_protector());
+ dependencies()->AssumePropertyCell(factory()->species_protector());
- // Check if {index} is less than {receiver} length.
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Check if {on_finally} is callable, and if so wrap it into appropriate
+ // closures that perform the finalization.
+ Node* check = graph()->NewNode(simplified()->ObjectIsCallable(), on_finally);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- // Load the character from the {receiver}.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* catch_true;
+ Node* then_true;
+ {
+ Node* context = jsgraph()->HeapConstant(native_context());
+ Node* constructor = jsgraph()->HeapConstant(
+ handle(native_context()->promise_function(), isolate()));
+
+ // Allocate shared context for the closures below.
+ context = etrue = graph()->NewNode(
+ javascript()->CreateFunctionContext(
+ PromiseBuiltinsAssembler::kPromiseFinallyContextLength,
+ FUNCTION_SCOPE),
+ context, context, etrue, if_true);
+ etrue =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kOnFinallySlot)),
+ context, on_finally, etrue, if_true);
+ etrue =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kConstructorSlot)),
+ context, constructor, etrue, if_true);
+
+ // Allocate the closure for the reject case.
+ Handle<SharedFunctionInfo> catch_finally(
+ native_context()->promise_catch_finally_shared_fun(), isolate());
+ catch_true = etrue =
+ graph()->NewNode(javascript()->CreateClosure(
+ catch_finally, factory()->many_closures_cell(),
+ handle(catch_finally->code(), isolate())),
+ context, etrue, if_true);
+
+ // Allocate the closure for the fulfill case.
+ Handle<SharedFunctionInfo> then_finally(
+ native_context()->promise_then_finally_shared_fun(), isolate());
+ then_true = etrue =
+ graph()->NewNode(javascript()->CreateClosure(
+ then_finally, factory()->many_closures_cell(),
+ handle(then_finally->code(), isolate())),
+ context, etrue, if_true);
+ }
- Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
- index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, if_true);
-
- // Return NaN otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->NaNConstant();
+ Node* efalse = effect;
+ Node* catch_false = on_finally;
+ Node* then_false = on_finally;
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* catch_finally =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ catch_true, catch_false, control);
+ Node* then_finally =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ then_true, then_false, control);
+
+ // At this point we definitely know that {receiver} has one of the
+ // {receiver_maps}, so insert a MapGuard as a hint for the lowering
+ // of the call to "then" below.
+ effect = graph()->NewNode(simplified()->MapGuard(receiver_maps), receiver,
+ effect, control);
+
+ // Massage the {node} to call "then" instead by first removing all inputs
+ // following the onFinally parameter, and then replacing the only parameter
+ // input with the {on_finally} value.
+ Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ for (; arity > 2; --arity) node->RemoveInput(2);
+ for (; arity < 2; ++arity)
+ node->InsertInput(graph()->zone(), 2, then_finally);
+ node->ReplaceInput(2, then_finally);
+ node->ReplaceInput(3, catch_finally);
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined,
+ p.speculation_mode()));
+ Reduction const reduction = ReducePromisePrototypeThen(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* on_fulfilled = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* on_rejected = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Check if the @@species protector is intact. The @@species protector
+ // guards the "constructor" lookup on all JSPromise instances and the
+ // initial Promise.prototype, as well as the Symbol.species lookup on
+ // the Promise constructor.
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
+ return NoChange();
+ }
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
+ }
+
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (infer_receiver_maps_result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Check that {on_fulfilled} is callable.
+ on_fulfilled = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ graph()->NewNode(simplified()->ObjectIsCallable(), on_fulfilled),
+ on_fulfilled, jsgraph()->UndefinedConstant());
+
+ // Check that {on_rejected} is callable.
+ on_rejected = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ graph()->NewNode(simplified()->ObjectIsCallable(), on_rejected),
+ on_rejected, jsgraph()->UndefinedConstant());
+
+ // Create the resulting JSPromise.
+ Node* result = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ // Chain {result} onto {receiver}.
+ result = effect = graph()->NewNode(javascript()->PerformPromiseThen(),
+ receiver, on_fulfilled, on_rejected,
+ result, context, effect, control);
+ ReplaceWithValue(node, result, effect, control);
+ return Replace(result);
+}
+
+// ES section #sec-promise.resolve
+Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
+ return NoChange();
+ }
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Only reduce when all {receiver_maps} are JSReceiver maps.
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSReceiverMap()) return NoChange();
+ }
+
+ // Morph the {node} into a JSPromiseResolve operation.
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, value);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->PromiseResolve());
+ return Changed(node);
}
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index b2656b6be8..675cc6df83 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_JS_CALL_REDUCER_H_
#include "src/base/flags.h"
+#include "src/compiler/frame-states.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
@@ -28,7 +29,7 @@ class SimplifiedOperatorBuilder;
// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
public:
// Flags that control the mode of operation.
enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
@@ -54,7 +55,8 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
- Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
+ Reduction ReduceCallApiFunction(Node* node,
+ Handle<SharedFunctionInfo> shared);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -71,16 +73,17 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGet(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
- Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayReduce(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayReduceRight(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
- enum class ArrayFindVariant : uint8_t { kFind, kFindIndex };
- Reduction ReduceArrayFind(ArrayFindVariant variant,
- Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayEvery(Handle<JSFunction> function, Node* node);
- Reduction ReduceArraySome(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayForEach(Node* node, Handle<SharedFunctionInfo> shared);
+ enum class ArrayReduceDirection { kLeft, kRight };
+ Reduction ReduceArrayReduce(Node* node, ArrayReduceDirection direction,
+ Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayMap(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayFilter(Node* node, Handle<SharedFunctionInfo> shared);
+ enum class ArrayFindVariant { kFind, kFindIndex };
+ Reduction ReduceArrayFind(Node* node, ArrayFindVariant variant,
+ Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayEvery(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArraySome(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
@@ -91,16 +94,46 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSConstructWithArrayLike(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSCall(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceReturnReceiver(Node* node);
- Reduction ReduceStringPrototypeIndexOf(Handle<JSFunction> function,
- Node* node);
- Reduction ReduceStringPrototypeCharAt(Node* node);
- Reduction ReduceStringPrototypeCharCodeAt(Node* node);
+ Reduction ReduceStringPrototypeIndexOf(Node* node);
+ Reduction ReduceStringPrototypeSubstring(Node* node);
+ Reduction ReduceStringPrototypeSlice(Node* node);
+ Reduction ReduceStringPrototypeStringAt(
+ const Operator* string_access_operator, Node* node);
+
+#ifdef V8_INTL_SUPPORT
+ Reduction ReduceStringPrototypeToLowerCaseIntl(Node* node);
+ Reduction ReduceStringPrototypeToUpperCaseIntl(Node* node);
+#endif // V8_INTL_SUPPORT
+
+ Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringPrototypeIterator(Node* node);
+ Reduction ReduceStringIteratorPrototypeNext(Node* node);
+
+ Reduction ReduceAsyncFunctionPromiseCreate(Node* node);
+ Reduction ReduceAsyncFunctionPromiseRelease(Node* node);
+ Reduction ReducePromiseCapabilityDefaultReject(Node* node);
+ Reduction ReducePromiseCapabilityDefaultResolve(Node* node);
+ Reduction ReducePromiseConstructor(Node* node);
+ Reduction ReducePromiseInternalConstructor(Node* node);
+ Reduction ReducePromiseInternalReject(Node* node);
+ Reduction ReducePromiseInternalResolve(Node* node);
+ Reduction ReducePromisePrototypeCatch(Node* node);
+ Reduction ReducePromisePrototypeFinally(Node* node);
+ Reduction ReducePromisePrototypeThen(Node* node);
+ Reduction ReducePromiseResolveTrampoline(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ Reduction ReduceMathUnary(Node* node, const Operator* op);
+ Reduction ReduceMathBinary(Node* node, const Operator* op);
+ Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
@@ -135,6 +168,11 @@ class JSCallReducer final : public AdvancedReducer {
Node** effect, Node** k,
const VectorSlotPair& feedback);
+ Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count, BailoutId bailout_id,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index d3b9ee4e70..f535b52a27 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -143,8 +143,12 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
+ case IrOpcode::kJSCreateStringIterator:
+ return ReduceJSCreateStringIterator(node);
case IrOpcode::kJSCreateKeyValueArray:
return ReduceJSCreateKeyValueArray(node);
+ case IrOpcode::kJSCreatePromise:
+ return ReduceJSCreatePromise(node);
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
return ReduceJSCreateLiteralArrayOrObject(node);
@@ -346,6 +350,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by arguments object.
bool has_aliased_arguments = false;
@@ -379,6 +390,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by arguments object.
Node* const elements = AllocateArguments(effect, control, args_state);
@@ -408,6 +426,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by the rest array.
Node* const elements =
@@ -490,8 +515,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
- a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
- undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting(),
+ jsgraph()->ZeroConstant());
}
// Handle in-object properties, too.
@@ -682,37 +707,37 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
if (arity == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else if (arity == 1) {
// Require elements kind to "go holey".
ArraySingleArgumentConstructorStub stub(
isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
DCHECK_GT(arity, 1);
ArrayNArgumentsConstructorStub stub(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
return Changed(node);
}
@@ -896,6 +921,8 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> shared = p.shared_info();
+ Handle<FeedbackCell> feedback_cell = p.feedback_cell();
+ Handle<Code> code = p.code();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -903,45 +930,54 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Use inline allocation of closures only for instantiation sites that have
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
- FeedbackSlot slot(FeedbackVector::ToSlot(p.feedback().index()));
- Handle<Cell> vector_cell(Cell::cast(p.feedback().vector()->Get(slot)));
- if (vector_cell->map() == isolate()->heap()->many_closures_cell_map()) {
- Handle<Map> function_map(
- Map::cast(native_context()->get(shared->function_map_index())));
- Node* lazy_compile_builtin = jsgraph()->HeapConstant(
- handle(isolate()->builtins()->builtin(Builtins::kCompileLazy)));
- DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
- DCHECK(!function_map->is_dictionary_map());
-
- // Emit code to allocate the JSFunction instance.
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(function_map->instance_size());
- a.Store(AccessBuilder::ForMap(), function_map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
- a.Store(AccessBuilder::ForJSFunctionContext(), context);
- a.Store(AccessBuilder::ForJSFunctionFeedbackVector(), vector_cell);
- a.Store(AccessBuilder::ForJSFunctionCode(), lazy_compile_builtin);
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- if (function_map->has_prototype_slot()) {
- a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
- jsgraph()->TheHoleConstant());
- STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
- }
- for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
- jsgraph()->UndefinedConstant());
- }
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
+ if (feedback_cell->map() != isolate()->heap()->many_closures_cell_map()) {
+ // The generic path can only create closures for user functions.
+ DCHECK_EQ(isolate()->builtins()->builtin(Builtins::kCompileLazy), *code);
+ return NoChange();
}
- return NoChange();
+ Handle<Map> function_map(
+ Map::cast(native_context()->get(shared->function_map_index())));
+ DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
+ DCHECK(!function_map->is_dictionary_map());
+
+ // TODO(turbofan): We should use the pretenure flag from {p} here,
+ // but currently the heuristic in the parser works against us, as
+ // it marks closures like
+ //
+ // args[l] = function(...) { ... }
+ //
+ // for old-space allocation, which doesn't always make sense. For
+ // example in case of the bluebird-parallel benchmark, where this
+ // is a core part of the *promisify* logic (see crbug.com/810132).
+ PretenureFlag pretenure = NOT_TENURED;
+
+ // Emit code to allocate the JSFunction instance.
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(function_map->instance_size(), pretenure, Type::Function());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionFeedbackCell(), feedback_cell);
+ a.Store(AccessBuilder::ForJSFunctionCode(), code);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ if (function_map->has_prototype_slot()) {
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
+ jsgraph()->TheHoleConstant());
+ STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
+ }
+ for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
@@ -968,6 +1004,28 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateStringIterator, node->opcode());
+ Node* string = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Node* map = jsgraph()->HeapConstant(
+ handle(native_context()->string_iterator_map(), isolate()));
+ // Allocate new iterator and attach the iterator to this string.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(), map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSStringIteratorString(), string);
+ a.Store(AccessBuilder::ForJSStringIteratorIndex(), jsgraph()->SmiConstant(0));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
Node* key = NodeProperties::GetValueInput(node, 0);
@@ -998,6 +1056,34 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Handle<Map> promise_map(native_context()->promise_function()->initial_map());
+
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(promise_map->instance_size());
+ a.Store(AccessBuilder::ForMap(), promise_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectOffset(JSPromise::kReactionsOrResultOffset),
+ jsgraph()->ZeroConstant());
+ STATIC_ASSERT(v8::Promise::kPending == 0);
+ a.Store(AccessBuilder::ForJSObjectOffset(JSPromise::kFlagsOffset),
+ jsgraph()->ZeroConstant());
+ STATIC_ASSERT(JSPromise::kSize == 5 * kPointerSize);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; ++i) {
+ a.Store(
+ AccessBuilder::ForJSObjectOffset(JSPromise::kSize + i * kPointerSize),
+ jsgraph()->ZeroConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
node->opcode() == IrOpcode::kJSCreateLiteralObject);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 42b4740dd0..00e8b73459 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -53,7 +53,9 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateBoundFunction(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
+ Reduction ReduceJSCreateStringIterator(Node* node);
Reduction ReduceJSCreateKeyValueArray(Node* node);
+ Reduction ReduceJSCreatePromise(Node* node);
Reduction ReduceJSCreateLiteralArrayOrObject(Node* node);
Reduction ReduceJSCreateEmptyLiteralObject(Node* node);
Reduction ReduceJSCreateEmptyLiteralArray(Node* node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index c09dcbc1b3..aa26b33997 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -85,6 +85,11 @@ REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
REPLACE_STUB_CALL(ForInEnumerate)
+REPLACE_STUB_CALL(FulfillPromise)
+REPLACE_STUB_CALL(PerformPromiseThen)
+REPLACE_STUB_CALL(PromiseResolve)
+REPLACE_STUB_CALL(RejectPromise)
+REPLACE_STUB_CALL(ResolvePromise)
#undef REPLACE_STUB_CALL
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
@@ -97,12 +102,12 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
Operator::Properties properties,
int result_size) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
properties, MachineType::AnyTagged(), result_size);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
@@ -113,14 +118,14 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties, flags);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
node->InsertInput(zone(), nargs + 1, ref);
node->InsertInput(zone(), nargs + 2, arity);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
@@ -344,7 +349,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
ArrayConstructorDescriptor descriptor(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, arity + 1,
CallDescriptor::kNeedsFrameState, node->op()->properties(),
MachineType::AnyTagged());
@@ -357,7 +362,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, type_info);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
@@ -366,26 +371,19 @@ void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- node->RemoveInput(3); // control
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.feedback_cell()));
+ node->RemoveInput(4); // control
// Use the FastNewClosure builtin only for functions allocated in new space.
if (p.pretenure() == NOT_TENURED) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
- node->InsertInput(zone(), 1,
- jsgraph()->HeapConstant(p.feedback().vector()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
ReplaceWithStubCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 1,
- jsgraph()->HeapConstant(p.feedback().vector()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
- ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
+ ReplaceWithRuntimeCall(node, Runtime::kNewClosure_Tenured);
}
}
@@ -420,10 +418,18 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreateStringIterator(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreatePromise(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -514,7 +520,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -527,7 +533,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, start_index);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstruct(Node* node) {
@@ -535,7 +541,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::Construct(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -546,14 +552,14 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
node->InsertInput(zone(), 2, new_target);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
@@ -563,7 +569,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
node->ReplaceInput(2, new_target);
node->ReplaceInput(3, arguments_list);
node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
@@ -573,7 +579,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
@@ -589,7 +595,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
node->InsertInput(zone(), 3, stack_arg_count);
node->InsertInput(zone(), 4, spread);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
@@ -597,7 +603,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallForwardVarargs(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -605,7 +611,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, start_index);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCall(Node* node) {
@@ -614,19 +620,19 @@ void JSGenericLowering::LowerJSCall(Node* node) {
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
Callable callable = CodeFactory::CallWithArrayLike(isolate());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = node->InputAt(1);
@@ -634,7 +640,7 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->ReplaceInput(3, receiver);
node->ReplaceInput(2, arguments_list);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
@@ -643,7 +649,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
int const spread_index = static_cast<int>(p.arity() + 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallWithSpread(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
@@ -652,7 +658,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
node->InsertInput(zone(), 2, stack_arg_count);
node->InsertInput(zone(), 3, node->InputAt(spread_index));
node->RemoveInput(spread_index + 1);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
@@ -693,6 +699,10 @@ void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSGeneratorRestoreContext(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorRestoreInputOrDebugPos(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index f5b4bdc181..9bbe2178fb 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -217,4 +217,4 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_JS_GRAPH_H_
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index add2b2c478..a995b038a8 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -323,8 +323,7 @@ bool JSInliner::DetermineCallTarget(
// target.
// TODO(turbofan): We might consider to eagerly create the feedback vector
// in such a case (in {DetermineCallContext} below) eventually.
- FeedbackSlot slot = p.feedback().slot();
- Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ Handle<FeedbackCell> cell = p.feedback_cell();
if (!cell->value()->IsFeedbackVector()) return false;
shared_info_out = p.shared_info();
@@ -348,9 +347,9 @@ void JSInliner::DetermineCallContext(
if (match.HasValue() && match.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- // If the target function was never invoked, its literals array might not
- // contain a feedback vector. We ensure at this point that it is created.
- JSFunction::EnsureLiterals(function);
+ // If the target function was never invoked, its feedback cell array might
+ // not contain a feedback vector. We ensure at this point that it's created.
+ JSFunction::EnsureFeedbackVector(function);
// The inlinee specializes to the context from the JSFunction object.
context_out = jsgraph()->Constant(handle(function->context()));
@@ -363,8 +362,7 @@ void JSInliner::DetermineCallContext(
// Load the feedback vector of the target by looking up its vector cell at
// the instantiation site (we only decide to inline if it's populated).
- FeedbackSlot slot = p.feedback().slot();
- Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ Handle<FeedbackCell> cell = p.feedback_cell();
DCHECK(cell->value()->IsFeedbackVector());
// The inlinee uses the locally provided context at instantiation.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index dc1ec521f2..c570a1f8dd 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -41,6 +41,14 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
+ case Runtime::kInlineAsyncFunctionAwaitCaught:
+ return ReduceAsyncFunctionAwaitCaught(node);
+ case Runtime::kInlineAsyncFunctionAwaitUncaught:
+ return ReduceAsyncFunctionAwaitUncaught(node);
+ case Runtime::kInlineAsyncGeneratorAwaitCaught:
+ return ReduceAsyncGeneratorAwaitCaught(node);
+ case Runtime::kInlineAsyncGeneratorAwaitUncaught:
+ return ReduceAsyncGeneratorAwaitUncaught(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -49,8 +57,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceAsyncGeneratorYield(node);
case Runtime::kInlineGeneratorGetResumeMode:
return ReduceGeneratorGetResumeMode(node);
- case Runtime::kInlineGeneratorGetContext:
- return ReduceGeneratorGetContext(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
@@ -69,6 +75,10 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
+ case Runtime::kInlineRejectPromise:
+ return ReduceRejectPromise(node);
+ case Runtime::kInlineResolvePromise:
+ return ReduceResolvePromise(node);
case Runtime::kInlineToInteger:
return ReduceToInteger(node);
case Runtime::kInlineToLength:
@@ -83,12 +93,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCall(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
- case Runtime::kInlineArrayBufferViewGetByteLength:
- return ReduceArrayBufferViewField(
- node, AccessBuilder::ForJSArrayBufferViewByteLength());
- case Runtime::kInlineArrayBufferViewGetByteOffset:
- return ReduceArrayBufferViewField(
- node, AccessBuilder::ForJSArrayBufferViewByteOffset());
case Runtime::kInlineArrayBufferViewWasNeutered:
return ReduceArrayBufferViewWasNeutered(node);
case Runtime::kInlineMaxSmi:
@@ -98,8 +102,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
AccessBuilder::ForJSTypedArrayLength());
case Runtime::kInlineTheHole:
return ReduceTheHole(node);
- case Runtime::kInlineClassOf:
- return ReduceClassOf(node);
case Runtime::kInlineStringMaxLength:
return ReduceStringMaxLength(node);
default:
@@ -183,6 +185,33 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitCaught), 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitUncaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitCaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitUncaught),
+ 0);
+}
+
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
return Change(
node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
@@ -201,16 +230,6 @@ Reduction JSIntrinsicLowering::ReduceAsyncGeneratorYield(Node* node) {
0);
}
-Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
- Node* const generator = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op =
- simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
-
- return Change(node, op, generator, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -272,6 +291,17 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
return Change(node, simplified()->ObjectIsSmi());
}
+Reduction JSIntrinsicLowering::ReduceRejectPromise(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->RejectPromise());
+ return Changed(node);
+}
+
+Reduction JSIntrinsicLowering::ReduceResolvePromise(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->ResolvePromise());
+ return Changed(node);
+}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
@@ -384,16 +414,6 @@ Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
- RelaxEffectsAndControls(node);
- // The ClassOf operator has a single value input and control input.
- Node* control_input = NodeProperties::GetControlInput(node, 0);
- node->TrimInputCount(2);
- node->ReplaceInput(1, control_input);
- NodeProperties::ChangeOp(node, simplified()->ClassOf());
- return Changed(node);
-}
-
Reduction JSIntrinsicLowering::ReduceStringMaxLength(Node* node) {
Node* value = jsgraph()->Constant(String::kMaxLength);
ReplaceWithValue(node, value);
@@ -438,12 +458,12 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
int stack_parameter_count) {
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
CallDescriptor::kNeedsFrameState, node->op()->properties());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 81cf5467d5..fb745986a6 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -44,8 +44,11 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
- Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+ Reduction ReduceAsyncFunctionAwaitCaught(Node* node);
+ Reduction ReduceAsyncFunctionAwaitUncaught(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitCaught(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitUncaught(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceAsyncGeneratorYield(Node* node);
@@ -54,6 +57,8 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
+ Reduction ReduceRejectPromise(Node* node);
+ Reduction ReduceResolvePromise(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToNumber(Node* node);
@@ -72,9 +77,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
// converted to proper CodeStubAssembler based builtins.
Reduction ReduceTheHole(Node* node);
- // TODO(turbofan): JavaScript builtins support; drop once all uses of
- // %_ClassOf in JavaScript builtins are eliminated.
- Reduction ReduceClassOf(Node* node);
Reduction ReduceStringMaxLength(Node* node);
Reduction Change(Node* node, const Operator* op);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index b2f8c567e2..35e0a551db 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -76,6 +76,10 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
+ case IrOpcode::kJSPromiseResolve:
+ return ReduceJSPromiseResolve(node);
+ case IrOpcode::kJSResolvePromise:
+ return ReduceJSResolvePromise(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadGlobal:
@@ -168,7 +172,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (m.HasValue() && m.Value()->IsJSObject()) {
receiver = Handle<JSObject>::cast(m.Value());
} else if (p.feedback().IsValid()) {
- InstanceOfICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (!nexus.GetConstructorFeedback().ToHandle(&receiver)) return NoChange();
} else {
return NoChange();
@@ -411,6 +415,87 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
return NoChange();
}
+// ES section #sec-promise-resolve
+Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSPromiseResolve, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if the {constructor} is the %Promise% function.
+ HeapObjectMatcher m(constructor);
+ if (!m.Is(handle(native_context()->promise_function()))) return NoChange();
+
+ // Check if we know something about the {value}.
+ ZoneHandleSet<Map> value_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(value, effect, &value_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, value_maps.size());
+
+ // Check that the {value} cannot be a JSPromise.
+ for (Handle<Map> const value_map : value_maps) {
+ if (value_map->IsJSPromiseMap()) return NoChange();
+ }
+
+ // Create a %Promise% instance and resolve it with {value}.
+ Node* promise = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+ effect = graph()->NewNode(javascript()->ResolvePromise(), promise, value,
+ context, frame_state, effect, control);
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
+// ES section #sec-promise-resolve-functions
+Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
+ Node* promise = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if we know something about the {resolution}.
+ ZoneHandleSet<Map> resolution_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(resolution, effect, &resolution_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) return NoChange();
+ DCHECK_NE(0, resolution_maps.size());
+
+ // Compute property access info for "then" on {resolution}.
+ PropertyAccessInfo access_info;
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ MapHandles(resolution_maps.begin(), resolution_maps.end()),
+ factory()->then_string(), AccessMode::kLoad, &access_info)) {
+ return NoChange();
+ }
+
+ // We can further optimize the case where {resolution}
+ // definitely doesn't have a "then" property.
+ if (!access_info.IsNotFound()) return NoChange();
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
+ // Add proper dependencies on the {resolution}s [[Prototype]]s.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
+ }
+
+ // Simply fulfill the {promise} with the {resolution}.
+ Node* value = effect =
+ graph()->NewNode(javascript()->FulfillPromise(), promise, resolution,
+ context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -945,16 +1030,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- // Check if the {nexus} reports type feedback for the IC.
- if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
- }
- return NoChange();
- }
-
// Extract receiver maps from the IC using the {nexus}.
MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
@@ -967,6 +1042,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
}
return NoChange();
}
+ DCHECK(!nexus.IsUninitialized());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
@@ -1007,9 +1083,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the load IC using the LoadICNexus.
+ // Extract receiver maps from the load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1022,9 +1098,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the store IC using the StoreICNexus.
+ // Extract receiver maps from the store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1036,9 +1112,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the IC using the StoreOwnICNexus.
+ // Extract receiver maps from the IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- StoreOwnICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the creation of a named property based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1264,9 +1340,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
return Replace(value);
}
-template <typename KeyedICNexus>
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
AccessMode access_mode, KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1354,16 +1429,6 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
}
- // Check if the {nexus} reports type feedback for the IC.
- if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- return NoChange();
- }
-
// Extract receiver maps from the {nexus}.
MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
@@ -1376,6 +1441,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
return NoChange();
}
+ DCHECK(!nexus.IsUninitialized());
// Optimize access for constant {index}.
HeapObjectMatcher mindex(index);
@@ -1543,9 +1609,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
}
}
- // Extract receiver maps from the keyed load IC using the KeyedLoadICNexus.
+ // Extract receiver maps from the keyed load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Extract the keyed access load mode from the keyed load IC.
KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
@@ -1561,9 +1627,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the KeyedStoreICNexus.
+ // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Extract the keyed access store mode from the keyed store IC.
KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
@@ -1663,7 +1729,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), call_interface_descriptor,
call_interface_descriptor.GetStackParameterCount() + argc +
1 /* implicit receiver */,
@@ -1960,8 +2026,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!p.feedback().IsValid()) return NoChange();
- StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
- p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
return NoChange();
}
@@ -2124,12 +2189,18 @@ JSNativeContextSpecialization::BuildElementAccess(
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- // Check that the {index} is a valid array index, we do the actual
- // bounds check below and just skip the store below if it's out of
+ // Only check that the {index} is in Signed32 range. We do the actual
+ // bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
index = effect = graph()->NewNode(
- simplified()->CheckBounds(VectorSlotPair()), index,
- jsgraph()->Constant(Smi::kMaxValue), effect, control);
+ simplified()->SpeculativeToNumber(NumberOperationHint::kSigned32,
+ VectorSlotPair()),
+ index, effect, control);
+
+ // Cast the {index} to Unsigned32 range, so that the bounds checks
+ // below are performed on unsigned values, which means that all the
+ // Negative32 values are treated as out-of-bounds.
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
} else {
// Check that the {index} is in the valid range for the {receiver}.
index = effect =
@@ -2193,10 +2264,10 @@ JSNativeContextSpecialization::BuildElementAccess(
case AccessMode::kStore: {
// Ensure that the {value} is actually a Number or an Oddball,
// and truncate it to a Number appropriately.
- value = effect =
- graph()->NewNode(simplified()->SpeculativeToNumber(
- NumberOperationHint::kNumberOrOddball),
- value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, VectorSlotPair()),
+ value, effect, control);
// Introduce the appropriate truncation for {value}. Currently we
// only need to do this for ClamedUint8Array {receiver}s, as the
@@ -2246,10 +2317,11 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
effect, control);
- // Don't try to store to a copy-on-write backing store.
+ // Don't try to store to a copy-on-write backing store (unless supported by
+ // the store mode).
if (access_mode == AccessMode::kStore &&
IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ !IsCOWHandlingStoreMode(store_mode)) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
@@ -2459,6 +2531,15 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
receiver, elements, index, elements_length, effect, control);
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, effect, control);
+ }
+
// Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
Node* check =
@@ -2524,13 +2605,16 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
+ Node* etrue;
+ Node* vtrue = etrue = graph()->NewNode(
+ simplified()->StringCharAt(), receiver, masked_index, *effect, if_true);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = jsgraph()->UndefinedConstant();
*control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ *effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, *effect, *control);
return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, *control);
} else {
@@ -2543,8 +2627,10 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
// Return the character from the {receiver} as single character string.
- return graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, *control);
+ Node* value = *effect =
+ graph()->NewNode(simplified()->StringCharAt(), receiver, masked_index,
+ *effect, *control);
+ return value;
}
}
@@ -2652,6 +2738,7 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
DCHECK_EQ(0, receiver_maps->size());
+ if (nexus.IsUninitialized()) return true;
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the inferred {receiver_maps}.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 879203c1dd..6df48d6e23 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -62,6 +62,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
+ Reduction ReduceJSPromiseResolve(Node* node);
+ Reduction ReduceJSResolvePromise(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
@@ -77,9 +79,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
- template <typename KeyedICNexus>
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
- KeyedICNexus const& nexus, AccessMode access_mode,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 0ddf859cff..31be6d9979 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -448,7 +448,8 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
- lhs.feedback() == rhs.feedback() &&
+ lhs.code().location() == rhs.code().location() &&
+ lhs.feedback_cell().location() == rhs.feedback_cell().location() &&
lhs.shared_info().location() == rhs.shared_info().location();
}
@@ -461,12 +462,13 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
return base::hash_combine(p.pretenure(), p.shared_info().location(),
- p.feedback());
+ p.feedback_cell().location());
}
std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
- return os << p.pretenure() << ", " << Brief(*p.shared_info());
+ return os << p.pretenure() << ", " << Brief(*p.shared_info()) << ", "
+ << Brief(*p.feedback_cell()) << ", " << Brief(*p.code());
}
@@ -543,42 +545,50 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
return OpParameter<CompareOperationHint>(op);
}
-#define CACHED_OP_LIST(V) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(Exponentiate, Operator::kNoProperties, 2, 1) \
- V(BitwiseNot, Operator::kNoProperties, 1, 1) \
- V(Decrement, Operator::kNoProperties, 1, 1) \
- V(Increment, Operator::kNoProperties, 1, 1) \
- V(Negate, Operator::kNoProperties, 1, 1) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToNumeric, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kNoProperties, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
- V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
- V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
- V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
- V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0) \
- V(Debugger, Operator::kNoProperties, 0, 0) \
+#define CACHED_OP_LIST(V) \
+ V(BitwiseOr, Operator::kNoProperties, 2, 1) \
+ V(BitwiseXor, Operator::kNoProperties, 2, 1) \
+ V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
+ V(ShiftLeft, Operator::kNoProperties, 2, 1) \
+ V(ShiftRight, Operator::kNoProperties, 2, 1) \
+ V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
+ V(Subtract, Operator::kNoProperties, 2, 1) \
+ V(Multiply, Operator::kNoProperties, 2, 1) \
+ V(Divide, Operator::kNoProperties, 2, 1) \
+ V(Modulus, Operator::kNoProperties, 2, 1) \
+ V(Exponentiate, Operator::kNoProperties, 2, 1) \
+ V(BitwiseNot, Operator::kNoProperties, 1, 1) \
+ V(Decrement, Operator::kNoProperties, 1, 1) \
+ V(Increment, Operator::kNoProperties, 1, 1) \
+ V(Negate, Operator::kNoProperties, 1, 1) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumeric, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kNoProperties, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateStringIterator, Operator::kEliminatable, 1, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
+ V(CreatePromise, Operator::kEliminatable, 0, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
+ V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+ V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(Debugger, Operator::kNoProperties, 0, 0) \
+ V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
+ V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
+ V(PromiseResolve, Operator::kNoProperties, 2, 1) \
+ V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
#define BINARY_OP_LIST(V) V(Add)
@@ -749,6 +759,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode,
SpeculationMode speculation_mode) {
+ DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
+ feedback.IsValid());
CallParameters parameters(arity, frequency, feedback, convert_mode,
speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
@@ -769,6 +781,8 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
const Operator* JSOperatorBuilder::CallWithSpread(
uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback,
SpeculationMode speculation_mode) {
+ DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
+ feedback.IsValid());
CallParameters parameters(arity, frequency, feedback,
ConvertReceiverMode::kAny, speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
@@ -1048,9 +1062,10 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
}
const Operator* JSOperatorBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
- PretenureFlag pretenure) {
- CreateClosureParameters parameters(shared_info, feedback, pretenure);
+ Handle<SharedFunctionInfo> shared_info, Handle<FeedbackCell> feedback_cell,
+ Handle<Code> code, PretenureFlag pretenure) {
+ CreateClosureParameters parameters(shared_info, feedback_cell, code,
+ pretenure);
return new (zone()) Operator1<CreateClosureParameters>( // --
IrOpcode::kJSCreateClosure, Operator::kEliminatable, // opcode
"JSCreateClosure", // name
@@ -1155,6 +1170,10 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
scope_info); // parameter
}
+#undef BINARY_OP_LIST
+#undef CACHED_OP_LIST
+#undef COMPARE_OP_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 3875234d5a..959a83026c 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -18,6 +18,7 @@ namespace internal {
class AllocationSite;
class BoilerplateDescription;
class ConstantElementsPair;
+class FeedbackCell;
class SharedFunctionInfo;
namespace compiler {
@@ -533,18 +534,23 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
class CreateClosureParameters final {
public:
CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
- VectorSlotPair const& feedback,
+ Handle<FeedbackCell> feedback_cell, Handle<Code> code,
PretenureFlag pretenure)
- : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
+ : shared_info_(shared_info),
+ feedback_cell_(feedback_cell),
+ code_(code),
+ pretenure_(pretenure) {}
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- VectorSlotPair const& feedback() const { return feedback_; }
+ Handle<FeedbackCell> feedback_cell() const { return feedback_cell_; }
+ Handle<Code> code() const { return code_; }
PretenureFlag pretenure() const { return pretenure_; }
private:
- const Handle<SharedFunctionInfo> shared_info_;
- VectorSlotPair const feedback_;
- const PretenureFlag pretenure_;
+ Handle<SharedFunctionInfo> const shared_info_;
+ Handle<FeedbackCell> const feedback_cell_;
+ Handle<Code> const code_;
+ PretenureFlag const pretenure_;
};
bool operator==(CreateClosureParameters const&, CreateClosureParameters const&);
@@ -652,10 +658,13 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
- VectorSlotPair const& feedback,
- PretenureFlag pretenure);
+ Handle<FeedbackCell> feedback_cell,
+ Handle<Code> code,
+ PretenureFlag pretenure = NOT_TENURED);
const Operator* CreateIterResultObject();
+ const Operator* CreateStringIterator();
const Operator* CreateKeyValueArray();
+ const Operator* CreatePromise();
const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
VectorSlotPair const& feedback,
int literal_flags, int number_of_elements);
@@ -675,12 +684,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
- SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
+ SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
+ SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -727,7 +736,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadModule(int32_t cell_index);
const Operator* StoreModule(int32_t cell_index);
- const Operator* ClassOf();
const Operator* HasInPrototypeChain();
const Operator* InstanceOf(const VectorSlotPair& feedback);
const Operator* OrdinaryHasInstance();
@@ -742,8 +750,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
// Used to implement Ignition's SuspendGenerator bytecode.
const Operator* GeneratorStore(int register_count);
- // Used to implement Ignition's RestoreGeneratorState bytecode.
+ // Used to implement Ignition's SwitchOnGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
+ const Operator* GeneratorRestoreContext();
+
// Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreRegister(int index);
const Operator* GeneratorRestoreInputOrDebugPos();
@@ -751,6 +761,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StackCheck();
const Operator* Debugger();
+ const Operator* FulfillPromise();
+ const Operator* PerformPromiseThen();
+ const Operator* PromiseResolve();
+ const Operator* RejectPromise();
+ const Operator* ResolvePromise();
+
const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
const Operator* CreateCatchContext(const Handle<String>& name,
const Handle<ScopeInfo>& scope_info);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 0ec63600a2..fac87bc685 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -60,14 +60,12 @@ class JSSpeculativeBinopBuilder final {
slot_(slot) {}
BinaryOperationHint GetBinaryOperationHint() {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot_));
- BinaryOpICNexus nexus(feedback_vector(), slot_);
+ FeedbackNexus nexus(feedback_vector(), slot_);
return nexus.GetBinaryOperationFeedback();
}
CompareOperationHint GetCompareOperationHint() {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot_));
- CompareICNexus nexus(feedback_vector(), slot_);
+ FeedbackNexus nexus(feedback_vector(), slot_);
return nexus.GetCompareOperationFeedback();
}
@@ -218,7 +216,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
const Operator* op, Node* operand, Node* effect, Node* control,
FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) {
@@ -282,7 +280,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
switch (op->opcode()) {
case IrOpcode::kJSStrictEqual: {
DCHECK(!slot.IsInvalid());
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -298,7 +296,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual: {
DCHECK(!slot.IsInvalid());
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -312,7 +310,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
case IrOpcode::kJSInstanceOf: {
DCHECK(!slot.IsInvalid());
- InstanceOfICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -334,7 +332,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus: {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
@@ -361,7 +359,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation(
Node* receiver, Node* cache_array, Node* cache_type, Node* index,
Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
@@ -375,7 +373,7 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
Node* control,
FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
@@ -387,13 +385,13 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceToNumberOperation(
Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
NumberOperationHint hint;
if (BinaryOperationHintToNumberOperationHint(
nexus.GetBinaryOperationFeedback(), &hint)) {
Node* node = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->SpeculativeToNumber(hint), input, effect,
- control);
+ jsgraph()->simplified()->SpeculativeToNumber(hint, VectorSlotPair()),
+ input, effect, control);
return LoweringResult::SideEffectFree(node, node, control);
}
return LoweringResult::NoChange();
@@ -405,7 +403,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation(
DCHECK(op->opcode() == IrOpcode::kJSCall ||
op->opcode() == IrOpcode::kJSCallWithSpread);
DCHECK(!slot.IsInvalid());
- CallICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
@@ -420,7 +418,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
op->opcode() == IrOpcode::kJSConstructWithSpread);
DCHECK(!slot.IsInvalid());
- CallICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
@@ -434,7 +432,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
DCHECK(!slot.IsInvalid());
- LoadICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -448,7 +446,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation(
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
DCHECK(!slot.IsInvalid());
- KeyedLoadICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
@@ -465,7 +463,7 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
op->opcode() == IrOpcode::kJSStoreNamedOwn);
DCHECK(!slot.IsInvalid());
- StoreICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -481,7 +479,7 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
DCHECK(!slot.IsInvalid());
- KeyedStoreICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index c265caf9f0..b3cd43ff71 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -504,6 +504,13 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
+ if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ // Always bake in String feedback into the graph.
+ // TODO(bmeurer): Consider adding a SpeculativeStringAdd operator,
+ // and use that in JSTypeHintLowering instead of looking at the
+ // binary operation feedback here.
+ r.CheckInputsToString();
+ }
if (r.OneInputIs(Type::String())) {
// We know that (at least) one input is already a String,
// so try to strength-reduce the non-String input.
@@ -539,7 +546,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
return ReduceCreateConsString(node);
}
// Eliminate useless concatenation of empty string.
- if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ if (r.BothInputsAre(Type::String())) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (r.LeftInputIs(empty_string_type_)) {
@@ -573,15 +580,17 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// JSAdd(x, y:string) => CallStub[StringAdd](x, y)
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
+ // We never get here when we had String feedback.
+ DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
return NoChange();
}
@@ -1092,12 +1101,13 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
{
// Convert {receiver} using the ToObjectStub.
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
- rfalse = efalse = if_false = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, frame_state, efalse, if_false);
+ rfalse = efalse = if_false =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), receiver,
+ context, frame_state, efalse, if_false);
}
// Update potential {IfException} uses of {node} to point to the above
@@ -1491,10 +1501,10 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
static const int kReturnCount = 1;
const char* debug_name = Builtins::name(builtin_index);
Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetCEntryStubCallDescriptor(
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
zone, kReturnCount, argc, debug_name, properties, flags);
- NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
+ NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
@@ -1666,6 +1676,12 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ if (function->shared()->HasBreakInfo()) {
+ // Do not inline the call if we need to check whether to break at entry.
+ return NoChange();
+ }
+
const int builtin_index = shared->code()->builtin_index();
const bool is_builtin = (builtin_index != -1);
@@ -1697,6 +1713,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Constant(arity);
+
if (NeedsArgumentAdaptorFrame(shared, arity)) {
// Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
Callable callable = CodeFactory::ArgumentAdaptor(isolate());
@@ -1745,7 +1762,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
node, javascript()->Call(p.arity(), p.frequency(), p.feedback(),
- convert_mode));
+ convert_mode, p.speculation_mode()));
return Changed(node);
}
@@ -1825,12 +1842,13 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
// {receiver} (does the ToName conversion implicitly).
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kForInFilter);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
- vfalse = efalse = if_false = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
- receiver, context, frame_state, effect, if_false);
+ vfalse = efalse = if_false =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), key,
+ receiver, context, frame_state, effect, if_false);
// Update potential {IfException} uses of {node} to point to the above
// ForInFilter stub call node instead.
@@ -2034,9 +2052,11 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
for (int i = 0; i < register_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
- value, effect, control);
+ if (value != jsgraph()->OptimizedOutConstant()) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
+ value, effect, control);
+ }
}
effect = graph()->NewNode(simplified()->StoreField(context_field), generator,
@@ -2069,6 +2089,21 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreContinuation(Node* node) {
return Changed(continuation);
}
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreContext, node->opcode());
+
+ const Operator* new_op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+ // Mutate the node in-place.
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ DCHECK(!OperatorProperties::HasContextInput(new_op));
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, node->opcode());
Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2190,6 +2225,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorStore(node);
case IrOpcode::kJSGeneratorRestoreContinuation:
return ReduceJSGeneratorRestoreContinuation(node);
+ case IrOpcode::kJSGeneratorRestoreContext:
+ return ReduceJSGeneratorRestoreContext(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index d72303f495..72ce4fb8dd 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -25,6 +25,8 @@ class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
class TypeCache;
+enum Signedness { kSigned, kUnsigned };
+
// Lowers JS-level operators to simplified operators based on types.
class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -72,6 +74,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSStoreMessage(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
+ Reduction ReduceJSGeneratorRestoreContext(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
Reduction ReduceJSGeneratorRestoreInputOrDebugPos(Node* node);
Reduction ReduceNumberBinop(Node* node);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index c2a84cc9b5..933ccc0a9c 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -14,6 +14,8 @@ namespace compiler {
if (FLAG_trace_turbo_jt) PrintF(__VA_ARGS__); \
} while (false)
+namespace {
+
struct JumpThreadingState {
bool forwarded;
ZoneVector<RpoNumber>& result;
@@ -53,6 +55,19 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
+bool IsBlockWithBranchPoisoning(InstructionSequence* code,
+ InstructionBlock* block) {
+ if (block->PredecessorCount() != 1) return false;
+ RpoNumber pred_rpo = (block->predecessors())[0];
+ const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
+ if (pred->code_start() == pred->code_end()) return false;
+ Instruction* instr = code->InstructionAt(pred->code_end() - 1);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ return mode == kFlags_branch_and_poison;
+}
+
+} // namespace
+
bool JumpThreading::ComputeForwarding(Zone* local_zone,
ZoneVector<RpoNumber>& result,
InstructionSequence* code,
@@ -72,46 +87,48 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
// Process the instructions in a block up to a non-empty instruction.
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
- bool fallthru = true;
RpoNumber fw = block->rpo_number();
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start ||
- !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
+ if (!IsBlockWithBranchPoisoning(code, block)) {
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount())
+ fw = RpoNumber::FromInt(next);
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -155,7 +172,8 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
bool fallthru = true;
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
- if (FlagsModeField::decode(instr->opcode()) == kFlags_branch) {
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp) {
if (skip[block_num]) {
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
index 84520ba3ed..3a378d0499 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/jump-threading.h
@@ -30,4 +30,4 @@ class JumpThreading {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_JUMP_THREADING_H
+#endif // V8_COMPILER_JUMP_THREADING_H_
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 5df50e64f5..7ccad439d9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -179,10 +179,8 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
return false;
// Some inline intrinsics are also safe to call without a FrameState.
- case Runtime::kInlineClassOf:
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineGeneratorClose:
- case Runtime::kInlineGeneratorGetContext:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
@@ -462,6 +460,8 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
// The target for interpreter dispatches is a code entry address.
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ const CallDescriptor::Flags kFlags =
+ CallDescriptor::kCanUseRoots | CallDescriptor::kFixedTargetRegister;
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
@@ -471,7 +471,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots, // flags
+ kFlags, // flags
descriptor.DebugName(isolate));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index ade1d6902f..5b08bc7f7c 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -184,7 +184,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Push argument count as part of function prologue.
kPushArgumentCount = 1u << 5,
// Use retpoline for this call if indirect.
- kRetpoline = 1u << 6
+ kRetpoline = 1u << 6,
+ // Use the kJavaScriptCallCodeStartRegister (fixed) register for the
+ // indirect target address when calling.
+ kFixedTargetRegister = 1u << 7
};
typedef base::Flags<Flag> Flags;
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/live-range-separator.h
index 6aaf6b69e6..0d48f25e5d 100644
--- a/deps/v8/src/compiler/live-range-separator.h
+++ b/deps/v8/src/compiler/live-range-separator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LIVE_RANGE_SEPARATOR_H_
-#define V8_LIVE_RANGE_SEPARATOR_H_
+#ifndef V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
+#define V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
#include "src/zone/zone.h"
namespace v8 {
@@ -61,4 +61,4 @@ class LiveRangeMerger final : public ZoneObject {
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_LIVE_RANGE_SEPARATOR_H_
+#endif // V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 7888f5a21e..a3b0eda15f 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -821,9 +821,11 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
} else {
- // We know that the resulting elements have the fixed array map.
- state = state->SetMaps(
- node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
+ // We know that the resulting elements have the fixed array map or the COW
+ // version thereof (if we didn't grow and it was already COW before).
+ ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+ fixed_array_maps.insert(factory()->fixed_cow_array_map(), zone());
+ state = state->SetMaps(node, fixed_array_maps, zone());
}
// Kill the previous elements on {object}.
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
@@ -1344,7 +1346,7 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
if (kDoubleSize != kPointerSize) {
return -1; // We currently only track pointer size fields.
}
- // Fall through.
+ break;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 1e93de5124..97d712f125 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -29,6 +29,7 @@ LoopVariableOptimizer::LoopVariableOptimizer(Graph* graph,
common_(common),
zone_(zone),
limits_(graph->NodeCount(), zone),
+ reduced_(graph->NodeCount(), zone),
induction_vars_(zone) {}
void LoopVariableOptimizer::Run() {
@@ -40,13 +41,13 @@ void LoopVariableOptimizer::Run() {
queue.pop();
queued.Set(node, false);
- DCHECK_NULL(limits_[node->id()]);
+ DCHECK(!reduced_.Get(node));
bool all_inputs_visited = true;
int inputs_end = (node->opcode() == IrOpcode::kLoop)
? kFirstBackedge
: node->op()->ControlInputCount();
for (int i = 0; i < inputs_end; i++) {
- if (limits_[NodeProperties::GetControlInput(node, i)->id()] == nullptr) {
+ if (!reduced_.Get(NodeProperties::GetControlInput(node, i))) {
all_inputs_visited = false;
break;
}
@@ -54,7 +55,7 @@ void LoopVariableOptimizer::Run() {
if (!all_inputs_visited) continue;
VisitNode(node);
- DCHECK_NOT_NULL(limits_[node->id()]);
+ reduced_.Set(node, true);
// Queue control outputs.
for (Edge edge : node->use_edges()) {
@@ -73,80 +74,6 @@ void LoopVariableOptimizer::Run() {
}
}
-class LoopVariableOptimizer::Constraint : public ZoneObject {
- public:
- InductionVariable::ConstraintKind kind() const { return kind_; }
- Node* left() const { return left_; }
- Node* right() const { return right_; }
-
- const Constraint* next() const { return next_; }
-
- Constraint(Node* left, InductionVariable::ConstraintKind kind, Node* right,
- const Constraint* next)
- : left_(left), right_(right), kind_(kind), next_(next) {}
-
- private:
- Node* left_;
- Node* right_;
- InductionVariable::ConstraintKind kind_;
- const Constraint* next_;
-};
-
-class LoopVariableOptimizer::VariableLimits : public ZoneObject {
- public:
- static VariableLimits* Empty(Zone* zone) {
- return new (zone) VariableLimits();
- }
-
- VariableLimits* Copy(Zone* zone) const {
- return new (zone) VariableLimits(this);
- }
-
- void Add(Node* left, InductionVariable::ConstraintKind kind, Node* right,
- Zone* zone) {
- head_ = new (zone) Constraint(left, kind, right, head_);
- limit_count_++;
- }
-
- void Merge(const VariableLimits* other) {
- // Change the current condition list to a longest common tail
- // of this condition list and the other list. (The common tail
- // should correspond to the list from the common dominator.)
-
- // First, we throw away the prefix of the longer list, so that
- // we have lists of the same length.
- size_t other_size = other->limit_count_;
- const Constraint* other_limit = other->head_;
- while (other_size > limit_count_) {
- other_limit = other_limit->next();
- other_size--;
- }
- while (limit_count_ > other_size) {
- head_ = head_->next();
- limit_count_--;
- }
-
- // Then we go through both lists in lock-step until we find
- // the common tail.
- while (head_ != other_limit) {
- DCHECK_LT(0, limit_count_);
- limit_count_--;
- other_limit = other_limit->next();
- head_ = head_->next();
- }
- }
-
- const Constraint* head() const { return head_; }
-
- private:
- VariableLimits() {}
- explicit VariableLimits(const VariableLimits* other)
- : head_(other->head_), limit_count_(other->limit_count_) {}
-
- const Constraint* head_ = nullptr;
- size_t limit_count_ = 0;
-};
-
void InductionVariable::AddUpperBound(Node* bound,
InductionVariable::ConstraintKind kind) {
if (FLAG_trace_turbo_loop) {
@@ -173,21 +100,19 @@ void LoopVariableOptimizer::VisitBackedge(Node* from, Node* loop) {
// Go through the constraints, and update the induction variables in
// this loop if they are involved in the constraint.
- const VariableLimits* limits = limits_[from->id()];
- for (const Constraint* constraint = limits->head(); constraint != nullptr;
- constraint = constraint->next()) {
- if (constraint->left()->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(constraint->left()) == loop) {
- auto var = induction_vars_.find(constraint->left()->id());
+ for (Constraint constraint : limits_.Get(from)) {
+ if (constraint.left->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint.left) == loop) {
+ auto var = induction_vars_.find(constraint.left->id());
if (var != induction_vars_.end()) {
- var->second->AddUpperBound(constraint->right(), constraint->kind());
+ var->second->AddUpperBound(constraint.right, constraint.kind);
}
}
- if (constraint->right()->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(constraint->right()) == loop) {
- auto var = induction_vars_.find(constraint->right()->id());
+ if (constraint.right->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint.right) == loop) {
+ auto var = induction_vars_.find(constraint.right->id());
if (var != induction_vars_.end()) {
- var->second->AddLowerBound(constraint->left(), constraint->kind());
+ var->second->AddLowerBound(constraint.left, constraint.kind);
}
}
}
@@ -214,11 +139,11 @@ void LoopVariableOptimizer::VisitNode(Node* node) {
void LoopVariableOptimizer::VisitMerge(Node* node) {
// Merge the limits of all incoming edges.
- VariableLimits* merged = limits_[node->InputAt(0)->id()]->Copy(zone());
+ VariableLimits merged = limits_.Get(node->InputAt(0));
for (int i = 1; i < node->InputCount(); i++) {
- merged->Merge(limits_[node->InputAt(i)->id()]);
+ merged.ResetToCommonAncestor(limits_.Get(node->InputAt(i)));
}
- limits_[node->id()] = merged;
+ limits_.Set(node, merged);
}
void LoopVariableOptimizer::VisitLoop(Node* node) {
@@ -230,27 +155,27 @@ void LoopVariableOptimizer::VisitLoop(Node* node) {
void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
Node* branch = node->InputAt(0);
Node* cond = branch->InputAt(0);
- VariableLimits* limits = limits_[branch->id()]->Copy(zone());
+ VariableLimits limits = limits_.Get(branch);
// Normalize to less than comparison.
switch (cond->opcode()) {
case IrOpcode::kJSLessThan:
case IrOpcode::kSpeculativeNumberLessThan:
- AddCmpToLimits(limits, cond, InductionVariable::kStrict, polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kStrict, polarity);
break;
case IrOpcode::kJSGreaterThan:
- AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, !polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, !polarity);
break;
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, polarity);
break;
case IrOpcode::kJSGreaterThanOrEqual:
- AddCmpToLimits(limits, cond, InductionVariable::kStrict, !polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kStrict, !polarity);
break;
default:
break;
}
- limits_[node->id()] = limits;
+ limits_.Set(node, limits);
}
void LoopVariableOptimizer::AddCmpToLimits(
@@ -260,19 +185,17 @@ void LoopVariableOptimizer::AddCmpToLimits(
Node* right = node->InputAt(1);
if (FindInductionVariable(left) || FindInductionVariable(right)) {
if (polarity) {
- limits->Add(left, kind, right, zone());
+ limits->PushFront(Constraint{left, kind, right}, zone());
} else {
kind = (kind == InductionVariable::kStrict)
? InductionVariable::kNonStrict
: InductionVariable::kStrict;
- limits->Add(right, kind, left, zone());
+ limits->PushFront(Constraint{right, kind, left}, zone());
}
}
}
-void LoopVariableOptimizer::VisitStart(Node* node) {
- limits_[node->id()] = VariableLimits::Empty(zone());
-}
+void LoopVariableOptimizer::VisitStart(Node* node) { limits_.Set(node, {}); }
void LoopVariableOptimizer::VisitLoopExit(Node* node) {
return TakeConditionsFromFirstControl(node);
@@ -284,10 +207,7 @@ void LoopVariableOptimizer::VisitOtherControl(Node* node) {
}
void LoopVariableOptimizer::TakeConditionsFromFirstControl(Node* node) {
- const VariableLimits* limits =
- limits_[NodeProperties::GetControlInput(node, 0)->id()];
- DCHECK_NOT_NULL(limits);
- limits_[node->id()] = limits;
+ limits_.Set(node, limits_.Get(NodeProperties::GetControlInput(node, 0)));
}
const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index 9eec614070..8e1d4bfebe 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
#define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+#include "src/compiler/functional-list.h"
+#include "src/compiler/node-aux-data.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -82,8 +84,17 @@ class LoopVariableOptimizer {
const int kAssumedLoopEntryIndex = 0;
const int kFirstBackedge = 1;
- class Constraint;
- class VariableLimits;
+ struct Constraint {
+ Node* left;
+ InductionVariable::ConstraintKind kind;
+ Node* right;
+
+ bool operator!=(const Constraint& other) const {
+ return left != other.left || kind != other.kind || right != other.right;
+ }
+ };
+
+ using VariableLimits = FunctionalList<Constraint>;
void VisitBackedge(Node* from, Node* loop);
void VisitNode(Node* node);
@@ -109,7 +120,9 @@ class LoopVariableOptimizer {
Graph* graph_;
CommonOperatorBuilder* common_;
Zone* zone_;
- ZoneVector<const VariableLimits*> limits_;
+ NodeAuxData<VariableLimits> limits_;
+ NodeAuxData<bool> reduced_;
+
ZoneMap<int, InductionVariable*> induction_vars_;
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 43f1518461..0c59453b41 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -62,8 +62,8 @@ class MachineRepresentationInferrer {
: MachineRepresentation::kBit;
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
- CallDescriptor const* desc = CallDescriptorOf(input->op());
- return desc->GetReturnType(index).representation();
+ auto call_descriptor = CallDescriptorOf(input->op());
+ return call_descriptor->GetReturnType(index).representation();
}
default:
return MachineRepresentation::kNone;
@@ -110,7 +110,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kTypedStateValues:
representation_vector_[node->id()] = MachineRepresentation::kNone;
break;
- case IrOpcode::kAtomicLoad:
+ case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
@@ -119,6 +119,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
+ case IrOpcode::kSpeculationPoison:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -132,27 +133,27 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
- CallDescriptor const* desc = CallDescriptorOf(node->op());
- if (desc->ReturnCount() > 0) {
+ auto call_descriptor = CallDescriptorOf(node->op());
+ if (call_descriptor->ReturnCount() > 0) {
representation_vector_[node->id()] =
- desc->GetReturnType(0).representation();
+ call_descriptor->GetReturnType(0).representation();
} else {
representation_vector_[node->id()] =
MachineRepresentation::kTagged;
}
break;
}
- case IrOpcode::kAtomicStore:
+ case IrOpcode::kWord32AtomicStore:
representation_vector_[node->id()] =
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
break;
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicCompareExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
representation_vector_[node->id()] = PromoteRepresentation(
AtomicOpRepresentationOf(node->op()).representation());
break;
@@ -459,19 +460,19 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
- case IrOpcode::kAtomicLoad:
+ case IrOpcode::kWord32AtomicLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
break;
case IrOpcode::kStore:
- case IrOpcode::kAtomicStore:
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -486,7 +487,7 @@ class MachineRepresentationChecker {
node, 2, inferrer_->GetRepresentation(node));
}
break;
- case IrOpcode::kAtomicCompareExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -737,15 +738,15 @@ class MachineRepresentationChecker {
}
void CheckCallInputs(Node const* node) {
- CallDescriptor const* desc = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
std::ostringstream str;
bool should_log_error = false;
- for (size_t i = 0; i < desc->InputCount(); ++i) {
+ for (size_t i = 0; i < call_descriptor->InputCount(); ++i) {
Node const* input = node->InputAt(static_cast<int>(i));
MachineRepresentation const input_type =
inferrer_->GetRepresentation(input);
MachineRepresentation const expected_input_type =
- desc->GetInputType(i).representation();
+ call_descriptor->GetInputType(i).representation();
if (!IsCompatible(expected_input_type, input_type)) {
if (!should_log_error) {
should_log_error = true;
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 97c83b1b82..1fcfa52e51 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -352,10 +352,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
- if (m.right().IsNaN()) { // x + NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
- }
if (m.IsFoldable()) { // K + K => K
return ReplaceFloat64(m.left().Value() + m.right().Value());
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 66178308be..c091146f1d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -37,7 +37,8 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kAtomicLoad == op->opcode());
+ IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kPoisonedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -81,7 +82,7 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
+ DCHECK_EQ(IrOpcode::kWord32AtomicStore, op->opcode());
return OpParameter<MachineRepresentation>(op);
}
@@ -169,6 +170,11 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord8ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord16ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord8ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord16ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
@@ -219,6 +225,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(SpeculationPoison, Operator::kNoProperties, 0, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
@@ -454,6 +461,14 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
+ struct PoisonedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ PoisonedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kPoisonedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<UnalignedLoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -471,6 +486,7 @@ struct MachineOperatorGlobalCache {
1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
+ PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
@@ -547,30 +563,31 @@ struct MachineOperatorGlobalCache {
#undef STORE
#define ATOMIC_LOAD(Type) \
- struct AtomicLoad##Type##Operator final \
+ struct Word32AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
- AtomicLoad##Type##Operator() \
+ Word32AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
- IrOpcode::kAtomicLoad, \
+ IrOpcode::kWord32AtomicLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- AtomicLoad##Type##Operator kAtomicLoad##Type;
+ Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_STORE(Type) \
- struct AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kAtomicStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
- }; \
- AtomicStore##Type##Operator kAtomicStore##Type;
+#define ATOMIC_STORE(Type) \
+ struct Word32AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word32AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
-#undef STORE
+#undef ATOMIC_STORE
#define ATOMIC_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \
@@ -580,27 +597,28 @@ struct MachineOperatorGlobalCache {
3, 1, 1, 1, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
-#define ATOMIC_OP_LIST(type) \
- ATOMIC_OP(AtomicExchange, type) \
- ATOMIC_OP(AtomicAdd, type) \
- ATOMIC_OP(AtomicSub, type) \
- ATOMIC_OP(AtomicAnd, type) \
- ATOMIC_OP(AtomicOr, type) \
- ATOMIC_OP(AtomicXor, type)
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(Word32AtomicExchange, type) \
+ ATOMIC_OP(Word32AtomicAdd, type) \
+ ATOMIC_OP(Word32AtomicSub, type) \
+ ATOMIC_OP(Word32AtomicAnd, type) \
+ ATOMIC_OP(Word32AtomicOr, type) \
+ ATOMIC_OP(Word32AtomicXor, type)
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#undef ATOMIC_OP
-#define ATOMIC_COMPARE_EXCHANGE(Type) \
- struct AtomicCompareExchange##Type##Operator \
- : public Operator1<MachineType> { \
- AtomicCompareExchange##Type##Operator() \
- : Operator1<MachineType>(IrOpcode::kAtomicCompareExchange, \
- Operator::kNoDeopt | Operator::kNoThrow, \
- "AtomicCompareExchange", 4, 1, 1, 1, 1, 0, \
- MachineType::Type()) {} \
- }; \
- AtomicCompareExchange##Type##Operator kAtomicCompareExchange##Type;
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word32AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word32AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord32AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicCompareExchange##Type##Operator \
+ kWord32AtomicCompareExchange##Type;
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
@@ -730,6 +748,16 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kPoisonedLoad##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -823,90 +851,93 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
-const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kAtomicLoad##Type; \
+const Operator* MachineOperatorBuilder::Word32AtomicLoad(
+ LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord32AtomicLoad##Type; \
}
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kAtomicStore##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicStore(
+ MachineRepresentation rep) {
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord32AtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
+#define EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicExchange##kRep; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicCompareExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
+ MachineType rep) {
+#define COMPARE_EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicCompareExchange##kRep; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType rep) {
+#define ADD(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicAdd##kRep; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType rep) {
+#define SUB(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicSub##kRep; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType rep) {
+#define AND(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicAnd##kRep; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType rep) {
+#define OR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicOr##kRep; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
+#define XOR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicXor##kRep; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
@@ -967,6 +998,19 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
2, 0, 0, 1, 0, 0, array);
}
+#undef PURE_BINARY_OP_LIST_32
+#undef PURE_BINARY_OP_LIST_64
+#undef MACHINE_PURE_OP_LIST
+#undef PURE_OPTIONAL_OP_LIST
+#undef OVERFLOW_OP_LIST
+#undef MACHINE_TYPE_LIST
+#undef MACHINE_REPRESENTATION_LIST
+#undef ATOMIC_TYPE_LIST
+#undef ATOMIC_REPRESENTATION_LIST
+#undef SIMD_LANE_OP_LIST
+#undef SIMD_FORMAT_LIST
+#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 10b4b15701..2cc1829116 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -350,6 +350,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* BitcastInt32ToFloat32();
const Operator* BitcastInt64ToFloat64();
+ // These operators sign-extend to Int32/Int64
+ const Operator* SignExtendWord8ToInt32();
+ const Operator* SignExtendWord16ToInt32();
+ const Operator* SignExtendWord8ToInt64();
+ const Operator* SignExtendWord16ToInt64();
+ const Operator* SignExtendWord32ToInt64();
+
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();
@@ -577,6 +584,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
+ const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
// store [base + index], value
@@ -592,29 +600,33 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
+ // Returns a value which can be used as a mask to poison values when executing
+ // speculatively.
+ const Operator* SpeculationPoison();
+
// Access to the machine stack.
const Operator* LoadStackPointer();
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
// atomic-load [base + index]
- const Operator* AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
- const Operator* AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
- const Operator* AtomicExchange(MachineType rep);
+ const Operator* Word32AtomicExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* AtomicCompareExchange(MachineType rep);
+ const Operator* Word32AtomicCompareExchange(MachineType rep);
// atomic-add [base + index], value
- const Operator* AtomicAdd(MachineType rep);
+ const Operator* Word32AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
- const Operator* AtomicSub(MachineType rep);
+ const Operator* Word32AtomicSub(MachineType rep);
// atomic-and [base + index], value
- const Operator* AtomicAnd(MachineType rep);
+ const Operator* Word32AtomicAnd(MachineType rep);
// atomic-or [base + index], value
- const Operator* AtomicOr(MachineType rep);
+ const Operator* Word32AtomicOr(MachineType rep);
// atomic-xor [base + index], value
- const Operator* AtomicXor(MachineType rep);
+ const Operator* Word32AtomicXor(MachineType rep);
const OptionalOperator SpeculationFence();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 596204e214..95418c4a81 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -15,13 +15,15 @@ namespace v8 {
namespace internal {
namespace compiler {
-MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
+MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
+ LoadPoisoning load_poisoning)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
- graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
+ graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ load_poisoning_(load_poisoning) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -229,9 +231,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
+ allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ Call(allocate_operator_.get(), target, size);
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
@@ -284,9 +286,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
+ allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), target, size));
@@ -348,7 +350,14 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ access.machine_type.representation() !=
+ MachineRepresentation::kTaggedPointer) {
+ NodeProperties::ChangeOp(node,
+ machine()->PoisonedLoad(access.machine_type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ }
EnqueueUses(node, state);
}
@@ -357,7 +366,14 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ access.machine_type.representation() !=
+ MachineRepresentation::kTaggedPointer) {
+ NodeProperties::ChangeOp(node,
+ machine()->PoisonedLoad(access.machine_type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ }
EnqueueUses(node, state);
}
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index e229f2b0be..38643ea8a3 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -31,7 +31,7 @@ typedef uint32_t NodeId;
// implicitly.
class MemoryOptimizer final {
public:
- MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
+ MemoryOptimizer(JSGraph* jsgraph, Zone* zone, LoadPoisoning load_poisoning);
~MemoryOptimizer() {}
void Optimize();
@@ -142,6 +142,7 @@ class MemoryOptimizer final {
ZoneQueue<Token> tokens_;
Zone* const zone_;
GraphAssembler graph_assembler_;
+ LoadPoisoning load_poisoning_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 3b57081c9e..91e68feb94 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -618,33 +618,53 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(at);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(at));
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing frames for deoptimization.
- __ push(ra);
- // The bal instruction puts the address of the current instruction into
- // the return address (ra) register, which we can use later on.
- __ bal(&current);
- __ nop();
- int pc = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ lw(a2, MemOperand(ra, offset));
- __ pop(ra);
- __ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ lw(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ ComputeCodeStartAddress(at);
+ __ Move(kSpeculationPoisonRegister, at);
+ __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ at);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -721,8 +741,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
- __ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Call(at, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1040,73 +1062,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsCtz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (IsMipsArchVariant(kMips32r6)) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ Ror(dst, src, 16);
- __ wsbh(dst, dst);
- __ bitswap(dst, dst);
- __ Clz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Addu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ Clz(dst, dst);
- // Subtract number of leading zeroes from 32 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 32);
- __ Subu(dst, kScratchReg, dst);
- }
+ __ Ctz(dst, src);
} break;
case kMipsPopcnt: {
- // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
- //
- // A generalization of the best bit counting method to integers of
- // bit-widths up to 128 (parameterized by type T) is this:
- //
- // v = v - ((v >> 1) & (T)~(T)0/3); // temp
- // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
- // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
- // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
- //
- // For comparison, for 32-bit quantities, this algorithm can be executed
- // using 20 MIPS instructions (the calls to LoadConst32() generate two
- // machine instructions each for the values being used in this algorithm).
- // A(n unrolled) loop-based algorithm requires 25 instructions.
- //
- // For 64-bit quantities, this algorithm gets executed twice, (once
- // for in_lo, and again for in_hi), but saves a few instructions
- // because the mask values only have to be loaded once. Using this
- // algorithm the count for a 64-bit operand can be performed in 29
- // instructions compared to a loop-based algorithm which requires 47
- // instructions.
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint32_t B0 = 0x55555555; // (T)~(T)0/3
- uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
- uint32_t value = 0x01010101; // (T)~(T)0/255
- uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ srl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Subu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ srl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Addu(kScratchReg, dst, kScratchReg);
- __ srl(dst, kScratchReg, 4);
- __ Addu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Mul(dst, dst, kScratchReg);
- __ srl(dst, dst, shift);
+ __ Popcnt(dst, src);
} break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1650,7 +1611,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMipsPeek: {
- int reverse_slot = MiscField::decode(instr->opcode());
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1676,9 +1638,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
__ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
}
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1689,74 +1654,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -2979,6 +2944,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3026,8 +2996,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -3253,9 +3224,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
- __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
}
- __ nop(); // Branch delay slot of the last beq.
AssembleArchJump(i.InputRpo(1));
}
@@ -3271,9 +3241,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
frame->AlignSavedCalleeRegisterSlots();
}
@@ -3285,7 +3255,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
@@ -3294,14 +3264,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3309,8 +3279,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3325,8 +3295,8 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
@@ -3355,8 +3325,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3364,19 +3334,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
__ MultiPopFPU(saves_fpu);
}
MipsOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -3498,21 +3468,33 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ st_b(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ Sdc1(src, g.ToMemOperand(destination));
- } else if (rep == MachineRepresentation::kFloat32) {
- __ swc1(src, g.ToMemOperand(destination));
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ DCHECK(destination->IsFPStackSlot());
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ Sdc1(src, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ swc1(src, g.ToMemOperand(destination));
+ } else {
+ UNREACHABLE();
+ }
}
}
} else if (source->IsFPStackSlot()) {
@@ -3526,7 +3508,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ lwc1(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(g.ToSimd128Register(destination), src);
}
} else {
FPURegister temp = kScratchDoubleReg;
@@ -3538,7 +3521,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ swc1(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ __ ld_b(temp, src);
+ __ st_b(temp, g.ToMemOperand(destination));
}
}
} else {
@@ -3579,29 +3565,50 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ sw(temp_0, dst);
__ sw(temp_1, src);
} else if (source->IsFPRegister()) {
- FPURegister temp = kScratchDoubleReg;
- FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(temp, src);
+ __ move_v(src, dst);
+ __ move_v(dst, temp);
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ }
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
__ Move(temp, src);
__ Ldc1(src, dst);
__ Sdc1(temp, dst);
} else if (rep == MachineRepresentation::kFloat32) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToFloatRegister(source);
__ Move(temp, src);
__ lwc1(src, dst);
__ swc1(temp, dst);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ __ move_v(temp, src);
+ __ ld_b(src, dst);
+ __ st_b(temp, dst);
}
}
} else if (source->IsFPStackSlot()) {
@@ -3627,7 +3634,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ swc1(temp_1, src0);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
+ MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
+ MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
+ MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp_1 = kSimd128ScratchReg;
+ __ ld_b(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ lw(temp_0, src2);
+ __ sw(temp_0, dst2);
+ __ lw(temp_0, src3);
+ __ sw(temp_0, dst3);
+ __ st_b(temp_1, src0);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 35b8a2396d..f0b8a0d588 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -267,7 +267,7 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
@@ -296,6 +296,10 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -310,6 +314,8 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1157,14 +1163,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
MipsOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1178,7 +1184,7 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Possibly align stack here for functions.
- int push_count = static_cast<int>(descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
@@ -1201,30 +1207,26 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
MipsOperandGenerator g(this);
int reverse_slot = 0;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
- ++reverse_slot;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
}
- InstructionOperand result = g.DefineAsRegister(output.node);
- Emit(kMipsPeek | MiscField::encode(reverse_slot), result);
- }
- if (output.location.GetType() == MachineType::Float64()) {
- // Float64 require an implicit second slot.
- ++reverse_slot;
+ Emit(kMipsPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
}
+ reverse_slot += output.location.GetSizeInPointers();
}
}
@@ -1467,12 +1469,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kMipsCmp, cont, false);
}
+} // namespace
+
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1481,41 +1484,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1527,17 +1530,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (!result || selector->IsDefined(result)) {
+ if (!result || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsAddOvf, cont);
+ return VisitBinop(this, node, kMipsAddOvf, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsSubOvf, cont);
+ return VisitBinop(this, node, kMipsSubOvf, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsMulOvf, cont);
+ return VisitBinop(this, node, kMipsMulOvf, cont);
default:
break;
}
@@ -1545,91 +1548,58 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kMipsTst, cont, true);
+ return VisitWordCompare(this, value, kMipsTst, cont, true);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- MipsOperandGenerator g(selector);
+ MipsOperandGenerator g(this);
InstructionCode const opcode = cont->Encode(kMipsCmp);
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->feedback(), cont->frame_state());
+ EmitDeoptimize(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- g.TempImmediate(0));
+ Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+ g.TempImmediate(0));
} else {
DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.TempImmediate(cont->trap_id()));
+ Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 9 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kMipsSub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 9 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMipsSub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1641,7 +1611,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -1778,7 +1748,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1786,13 +1756,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1812,7 +1784,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
}
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1821,13 +1793,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1848,7 +1820,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1856,15 +1828,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1886,7 +1858,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1895,15 +1867,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1967,11 +1939,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -1996,11 +1969,6 @@ void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
@@ -2263,6 +2231,16 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2302,11 +2280,13 @@ InstructionSelector::AlignmentRequirements() {
}
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
#undef SIMD_TYPE_LIST
-#undef SIMD_FORMAT_LIST
#undef TRACE_UNIMPL
#undef TRACE
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index d4463008c8..ab84fe22b2 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -634,33 +634,53 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(at);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(at));
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing frames for deoptimization.
- __ push(ra);
- // The bal instruction puts the address of the current instruction into
- // the return address (ra) register, which we can use later on.
- __ bal(&current);
- __ nop();
- int pc = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ Ld(a2, MemOperand(ra, offset));
- __ pop(ra);
- __ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ ComputeCodeStartAddress(at);
+ __ Move(kSpeculationPoisonRegister, at);
+ __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ at);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -746,9 +766,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
- __ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(at);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1153,124 +1174,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Ctz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (kArchVariant == kMips64r6) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ rotr(dst, src, 16);
- __ wsbh(dst, dst);
- __ bitswap(dst, dst);
- __ Clz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Daddu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ Clz(dst, dst);
- // Subtract number of leading zeroes from 32 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 32);
- __ Subu(dst, kScratchReg, dst);
- }
+ __ Ctz(dst, src);
} break;
case kMips64Dctz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (kArchVariant == kMips64r6) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ dsbh(dst, src);
- __ dshd(dst, dst);
- __ dbitswap(dst, dst);
- __ dclz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Daddu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ dclz(dst, dst);
- // Subtract number of leading zeroes from 64 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 64);
- __ Dsubu(dst, kScratchReg, dst);
- }
+ __ Dctz(dst, src);
} break;
case kMips64Popcnt: {
- // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
- //
- // A generalization of the best bit counting method to integers of
- // bit-widths up to 128 (parameterized by type T) is this:
- //
- // v = v - ((v >> 1) & (T)~(T)0/3); // temp
- // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
- // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
- // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
- //
- // For comparison, for 32-bit quantities, this algorithm can be executed
- // using 20 MIPS instructions (the calls to LoadConst32() generate two
- // machine instructions each for the values being used in this algorithm).
- // A(n unrolled) loop-based algorithm requires 25 instructions.
- //
- // For a 64-bit operand this can be performed in 24 instructions compared
- // to a(n unrolled) loop based algorithm which requires 38 instructions.
- //
- // There are algorithms which are faster in the cases where very few
- // bits are set but the algorithm here attempts to minimize the total
- // number of instructions executed even when a large number of bits
- // are set.
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint32_t B0 = 0x55555555; // (T)~(T)0/3
- uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
- uint32_t value = 0x01010101; // (T)~(T)0/255
- uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ srl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Subu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ srl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Addu(kScratchReg, dst, kScratchReg);
- __ srl(dst, kScratchReg, 4);
- __ Addu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Mul(dst, dst, kScratchReg);
- __ srl(dst, dst, shift);
+ __ Popcnt(dst, src);
} break;
case kMips64Dpopcnt: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
- uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
- uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
- uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
- uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ dsrl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Dsubu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ dsrl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Daddu(kScratchReg, dst, kScratchReg);
- __ dsrl(dst, kScratchReg, 4);
- __ Daddu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Dmul(dst, dst, kScratchReg);
- __ dsrl32(dst, dst, shift);
+ __ Dpopcnt(dst, src);
} break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1901,7 +1820,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Peek: {
// The incoming value is 0-based, but we need a 1-based value.
- int reverse_slot = MiscField::decode(instr->opcode()) + 1;
+ int reverse_slot = i.InputInt32(0) + 1;
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1924,7 +1843,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64StoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
- __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
} else {
__ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
@@ -1939,75 +1863,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
__ sll(i.InputRegister(2), i.InputRegister(2), 0);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3240,6 +3164,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3285,8 +3214,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
@@ -3523,9 +3453,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
- __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
}
- __ nop(); // Branch delay slot of the last beq.
AssembleArchJump(i.InputRpo(1));
}
@@ -3541,9 +3470,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
@@ -3551,7 +3480,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
@@ -3560,15 +3489,15 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3576,8 +3505,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3592,8 +3521,8 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
@@ -3623,7 +3552,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3631,19 +3560,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
__ MultiPopFPU(saves_fpu);
}
MipsOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -3660,7 +3589,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
AssembleDeconstructFrame();
}
}
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
if (pop->IsImmediate()) {
pop_count += g.ToConstant(pop).ToInt32();
} else {
@@ -3770,23 +3699,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ st_b(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- __ Sdc1(src, g.ToMemOperand(destination));
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Sdc1(src, g.ToMemOperand(destination));
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsFPRegister()) {
- __ Ldc1(g.ToDoubleRegister(destination), src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ if (destination->IsSimd128Register()) {
+ __ ld_b(g.ToSimd128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MSARegister temp = kSimd128ScratchReg;
+ __ ld_b(temp, src);
+ __ st_b(temp, g.ToMemOperand(destination));
+ }
} else {
- FPURegister temp = kScratchDoubleReg;
- __ Ldc1(temp, src);
- __ Sdc1(temp, g.ToMemOperand(destination));
+ if (destination->IsFPRegister()) {
+ __ Ldc1(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Ldc1(temp, src);
+ __ Sdc1(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
@@ -3826,34 +3782,73 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Sd(temp_0, dst);
__ Sd(temp_1, src);
} else if (source->IsFPRegister()) {
- FPURegister temp = kScratchDoubleReg;
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(temp, src);
+ __ move_v(src, dst);
+ __ move_v(dst, temp);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ move_v(temp, src);
+ __ ld_b(src, dst);
+ __ st_b(temp, dst);
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ Ldc1(src, dst);
- __ Sdc1(temp, dst);
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ Ldc1(src, dst);
+ __ Sdc1(temp, dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
- FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ Ldc1(temp_1, dst0); // Save destination in temp_1.
- __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ Sw(temp_0, dst0);
- __ Lw(temp_0, src1);
- __ Sw(temp_0, dst1);
- __ Sdc1(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
+ MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
+ MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
+ MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp_1 = kSimd128ScratchReg;
+ __ ld_b(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Lw(temp_0, src2);
+ __ Sw(temp_0, dst2);
+ __ Lw(temp_0, src3);
+ __ Sw(temp_0, dst3);
+ __ st_b(temp_1, src0);
+ } else {
+ FPURegister temp_1 = kScratchDoubleReg;
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Sdc1(temp_1, src0);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -3866,7 +3861,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 3058812bec..a50d294013 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
-#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#ifndef V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
+#define V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
namespace v8 {
namespace internal {
@@ -331,4 +331,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#endif // V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 38f077c4e6..f7c8cab67b 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -379,7 +379,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
@@ -410,10 +410,16 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
EmitLoad(this, node, opcode);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1382,6 +1388,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
break;
}
}
+ break;
}
default:
break;
@@ -1650,14 +1657,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
Mips64OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1668,10 +1675,17 @@ void InstructionSelector::EmitPrepareArguments(
++slot;
}
} else {
- int push_count = static_cast<int>(descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
@@ -1683,9 +1697,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
Mips64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1693,14 +1707,14 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
}
- InstructionOperand result = g.DefineAsRegister(output.node);
- Emit(kMips64Peek | MiscField::encode(reverse_slot), result);
+ Emit(kMips64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
@@ -2057,12 +2071,13 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (selector->CanCover(user, value)) {
+ while (CanCover(user, value)) {
if (value->opcode() == IrOpcode::kWord32Equal) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -2080,56 +2095,56 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -2141,23 +2156,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64Dadd, cont);
+ return VisitBinop(this, node, kMips64Dadd, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64Dsub, cont);
+ return VisitBinop(this, node, kMips64Dsub, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64MulOvf, cont);
+ return VisitBinop(this, node, kMips64MulOvf, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ return VisitBinop(this, node, kMips64DaddOvf, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64DsubOvf, cont);
+ return VisitBinop(this, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -2166,49 +2181,14 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kWord32And:
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kMips64Tst, cont, true);
+ return VisitWordCompare(this, value, kMips64Tst, cont, true);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(selector, value, cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ EmitWordCompareZero(this, value, cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -2216,24 +2196,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 10 + 2 * sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kMips64Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMips64Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2245,7 +2227,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWord32Compare(this, node, &cont);
@@ -2330,7 +2312,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWord64Compare(this, node, &cont);
@@ -2431,7 +2413,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2439,13 +2421,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2464,7 +2448,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
}
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2473,13 +2457,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2500,7 +2484,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2508,15 +2492,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2538,7 +2522,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2547,15 +2531,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2619,11 +2603,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2648,12 +2633,6 @@ void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
V(I16x8) \
V(I8x16)
-// TODO(mostynb@opera.com): this is never used, remove it?
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
@@ -2921,6 +2900,32 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2957,10 +2962,12 @@ InstructionSelector::AlignmentRequirements() {
}
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
-#undef SIMD_FORMAT_LIST
#undef SIMD_TYPE_LIST
#undef TRACE_UNIMPL
#undef TRACE
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index 7c132ab153..c78da1e517 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MOVE_OPTIMIZER_
-#define V8_COMPILER_MOVE_OPTIMIZER_
+#ifndef V8_COMPILER_MOVE_OPTIMIZER_H_
+#define V8_COMPILER_MOVE_OPTIMIZER_H_
#include "src/compiler/instruction.h"
#include "src/globals.h"
@@ -65,4 +65,4 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MOVE_OPTIMIZER_
+#endif // V8_COMPILER_MOVE_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index 277ff18034..c395475109 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -15,15 +15,27 @@ namespace compiler {
// Forward declarations.
class Node;
-template <class T, T def()>
+template <class T>
+T DefaultConstruct() {
+ return T();
+}
+
+template <class T, T def() = DefaultConstruct<T>>
class NodeAuxData {
public:
explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
+ explicit NodeAuxData(size_t initial_size, Zone* zone)
+ : aux_data_(initial_size, zone) {}
- void Set(Node* node, T const& data) {
+ // Update entry. Returns true iff entry was changed.
+ bool Set(Node* node, T const& data) {
size_t const id = node->id();
if (id >= aux_data_.size()) aux_data_.resize(id + 1, def());
- aux_data_[id] = data;
+ if (aux_data_[id] != data) {
+ aux_data_[id] = data;
+ return true;
+ }
+ return false;
}
T Get(Node* node) const {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 22004337eb..5fe6e5d420 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -516,6 +516,27 @@ MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
}
// static
+bool NodeProperties::HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ switch (result) {
+ case NodeProperties::kUnreliableReceiverMaps:
+ case NodeProperties::kReliableReceiverMaps:
+ DCHECK_NE(0, receiver_maps.size());
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != instance_type) return false;
+ }
+ return true;
+
+ case NodeProperties::kNoReceiverMaps:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index abc6622c83..7388bf94dd 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -155,6 +155,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
static MaybeHandle<Map> GetMapWitness(Node* node);
+ static bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type);
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index ededcc4806..f988b954fb 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -60,8 +60,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
// Verify that none of the inputs are {nullptr}.
for (int i = 0; i < input_count; i++) {
if (inputs[i] == nullptr) {
- V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
- static_cast<int>(id), op->mnemonic(), i);
+ FATAL("Node::New() Error: #%d:%s[%d] is nullptr", static_cast<int>(id),
+ op->mnemonic(), i);
}
}
#endif
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 8a4685114b..26fc03fb13 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -65,12 +65,12 @@ class V8_EXPORT_PRIVATE Node final {
#ifdef DEBUG
void Verify();
-#define BOUNDS_CHECK(index) \
- do { \
- if (index < 0 || index >= InputCount()) { \
- V8_Fatal(__FILE__, __LINE__, "Node #%d:%s->InputAt(%d) out of bounds", \
- id(), op()->mnemonic(), index); \
- } \
+#define BOUNDS_CHECK(index) \
+ do { \
+ if (index < 0 || index >= InputCount()) { \
+ FATAL("Node #%d:%s->InputAt(%d) out of bounds", id(), op()->mnemonic(), \
+ index); \
+ } \
} while (false)
#else
// No bounds checks or verification in release mode.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index ec6c720af2..9a8f1e1df8 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -140,7 +140,9 @@
V(JSCreateClosure) \
V(JSCreateGeneratorObject) \
V(JSCreateIterResultObject) \
+ V(JSCreateStringIterator) \
V(JSCreateKeyValueArray) \
+ V(JSCreatePromise) \
V(JSCreateLiteralArray) \
V(JSCreateEmptyLiteralArray) \
V(JSCreateLiteralObject) \
@@ -191,8 +193,14 @@
V(JSStoreModule) \
V(JSGeneratorStore) \
V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreContext) \
V(JSGeneratorRestoreRegister) \
V(JSGeneratorRestoreInputOrDebugPos) \
+ V(JSFulfillPromise) \
+ V(JSPerformPromiseThen) \
+ V(JSPromiseResolve) \
+ V(JSRejectPromise) \
+ V(JSResolvePromise) \
V(JSStackCheck) \
V(JSDebugger)
@@ -342,6 +350,7 @@
V(StringLength) \
V(StringToLowerCaseIntl) \
V(StringToUpperCaseIntl) \
+ V(StringSubstring) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
@@ -361,7 +370,6 @@
V(ConvertReceiver) \
V(ConvertTaggedHoleToUndefined) \
V(TypeOf) \
- V(ClassOf) \
V(Allocate) \
V(AllocateRaw) \
V(LoadFieldByIndex) \
@@ -538,88 +546,95 @@
V(Float64Mod) \
V(Float64Pow)
-#define MACHINE_OP_LIST(V) \
- MACHINE_UNOP_32_LIST(V) \
- MACHINE_BINOP_32_LIST(V) \
- MACHINE_BINOP_64_LIST(V) \
- MACHINE_COMPARE_BINOP_LIST(V) \
- MACHINE_FLOAT32_BINOP_LIST(V) \
- MACHINE_FLOAT32_UNOP_LIST(V) \
- MACHINE_FLOAT64_BINOP_LIST(V) \
- MACHINE_FLOAT64_UNOP_LIST(V) \
- V(DebugAbort) \
- V(DebugBreak) \
- V(Comment) \
- V(Load) \
- V(Store) \
- V(StackSlot) \
- V(Word32Popcnt) \
- V(Word64Popcnt) \
- V(Word64Clz) \
- V(Word64Ctz) \
- V(Word64ReverseBits) \
- V(Word64ReverseBytes) \
- V(Int64AbsWithOverflow) \
- V(BitcastTaggedToWord) \
- V(BitcastWordToTagged) \
- V(BitcastWordToTaggedSigned) \
- V(TruncateFloat64ToWord32) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeFloat64ToUint64) \
- V(Float64SilenceNaN) \
- V(TruncateFloat64ToUint32) \
- V(TruncateFloat32ToInt32) \
- V(TruncateFloat32ToUint32) \
- V(TryTruncateFloat32ToInt64) \
- V(TryTruncateFloat64ToInt64) \
- V(TryTruncateFloat32ToUint64) \
- V(TryTruncateFloat64ToUint64) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateInt64ToInt32) \
- V(RoundFloat64ToInt32) \
- V(RoundInt32ToFloat32) \
- V(RoundInt64ToFloat32) \
- V(RoundInt64ToFloat64) \
- V(RoundUint32ToFloat32) \
- V(RoundUint64ToFloat32) \
- V(RoundUint64ToFloat64) \
- V(BitcastFloat32ToInt32) \
- V(BitcastFloat64ToInt64) \
- V(BitcastInt32ToFloat32) \
- V(BitcastInt64ToFloat64) \
- V(Float64ExtractLowWord32) \
- V(Float64ExtractHighWord32) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(LoadStackPointer) \
- V(LoadFramePointer) \
- V(LoadParentFramePointer) \
- V(UnalignedLoad) \
- V(UnalignedStore) \
- V(Int32PairAdd) \
- V(Int32PairSub) \
- V(Int32PairMul) \
- V(Word32PairShl) \
- V(Word32PairShr) \
- V(Word32PairSar) \
- V(ProtectedLoad) \
- V(ProtectedStore) \
- V(AtomicLoad) \
- V(AtomicStore) \
- V(AtomicExchange) \
- V(AtomicCompareExchange) \
- V(AtomicAdd) \
- V(AtomicSub) \
- V(AtomicAnd) \
- V(AtomicOr) \
- V(AtomicXor) \
- V(SpeculationFence) \
+#define MACHINE_OP_LIST(V) \
+ MACHINE_UNOP_32_LIST(V) \
+ MACHINE_BINOP_32_LIST(V) \
+ MACHINE_BINOP_64_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ MACHINE_FLOAT32_BINOP_LIST(V) \
+ MACHINE_FLOAT32_UNOP_LIST(V) \
+ MACHINE_FLOAT64_BINOP_LIST(V) \
+ MACHINE_FLOAT64_UNOP_LIST(V) \
+ V(DebugAbort) \
+ V(DebugBreak) \
+ V(Comment) \
+ V(Load) \
+ V(PoisonedLoad) \
+ V(Store) \
+ V(StackSlot) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
+ V(Word64ReverseBits) \
+ V(Word64ReverseBytes) \
+ V(Int64AbsWithOverflow) \
+ V(BitcastTaggedToWord) \
+ V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
+ V(TruncateFloat64ToWord32) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
+ V(Float64SilenceNaN) \
+ V(TruncateFloat64ToUint32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint32ToFloat32) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(SpeculationPoison) \
+ V(LoadStackPointer) \
+ V(LoadFramePointer) \
+ V(LoadParentFramePointer) \
+ V(UnalignedLoad) \
+ V(UnalignedStore) \
+ V(Int32PairAdd) \
+ V(Int32PairSub) \
+ V(Int32PairMul) \
+ V(Word32PairShl) \
+ V(Word32PairShr) \
+ V(Word32PairSar) \
+ V(ProtectedLoad) \
+ V(ProtectedStore) \
+ V(Word32AtomicLoad) \
+ V(Word32AtomicStore) \
+ V(Word32AtomicExchange) \
+ V(Word32AtomicCompareExchange) \
+ V(Word32AtomicAdd) \
+ V(Word32AtomicSub) \
+ V(Word32AtomicAnd) \
+ V(Word32AtomicOr) \
+ V(Word32AtomicXor) \
+ V(SpeculationFence) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
+ V(SignExtendWord8ToInt64) \
+ V(SignExtendWord16ToInt64) \
+ V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 226faeaa82..fc774f8706 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -510,7 +510,7 @@ Type* OperationTyper::NumberToString(Type* type) {
if (type->IsNone()) return type;
if (type->Is(Type::NaN())) return singleton_NaN_string_;
if (type->Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
- return Type::SeqString();
+ return Type::String();
}
Type* OperationTyper::NumberToUint32(Type* type) {
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index d786bb3ee5..bd715df25e 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -115,6 +115,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSDecrement:
case IrOpcode::kJSIncrement:
case IrOpcode::kJSNegate:
+ case IrOpcode::kJSPromiseResolve:
+ case IrOpcode::kJSRejectPromise:
+ case IrOpcode::kJSResolvePromise:
return true;
default:
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index ab0eebbbdd..e94165b36a 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_PERSISTENT_H_
-#define V8_COMPILER_PERSISTENT_H_
+#ifndef V8_COMPILER_PERSISTENT_MAP_H_
+#define V8_COMPILER_PERSISTENT_MAP_H_
#include <array>
-#include <bitset>
#include <tuple>
#include "src/base/functional.h"
@@ -74,8 +73,7 @@ class PersistentMap {
}
// Add or overwrite an existing key-value pair.
- PersistentMap Add(Key key, Value value) const;
- void Set(Key key, Value value) { *this = Add(key, value); }
+ void Set(Key key, Value value);
bool operator==(const PersistentMap& other) const {
if (tree_ == other.tree_) return true;
@@ -202,17 +200,16 @@ struct PersistentMap<Key, Value, Hasher>::FocusedTree {
template <class Key, class Value, class Hasher>
class PersistentMap<Key, Value, Hasher>::HashValue {
public:
- explicit HashValue(size_t hash) : bits_(hash) {}
- explicit HashValue(std::bitset<kHashBits> hash) : bits_(hash) {}
+ explicit HashValue(size_t hash) : bits_(static_cast<uint32_t>(hash)) {}
Bit operator[](int pos) const {
- return bits_[kHashBits - pos - 1] ? kRight : kLeft;
+ DCHECK_LT(pos, kHashBits);
+ return bits_ & (static_cast<decltype(bits_)>(1) << (kHashBits - pos - 1))
+ ? kRight
+ : kLeft;
}
- bool operator<(HashValue other) const {
- static_assert(sizeof(*this) <= sizeof(unsigned long), ""); // NOLINT
- return bits_.to_ulong() < other.bits_.to_ulong();
- }
+ bool operator<(HashValue other) const { return bits_ < other.bits_; }
bool operator==(HashValue other) const { return bits_ == other.bits_; }
bool operator!=(HashValue other) const { return bits_ != other.bits_; }
HashValue operator^(HashValue other) const {
@@ -220,7 +217,8 @@ class PersistentMap<Key, Value, Hasher>::HashValue {
}
private:
- std::bitset<kHashBits> bits_;
+ static_assert(sizeof(uint32_t) * 8 == kHashBits, "wrong type for bits_");
+ uint32_t bits_;
};
template <class Key, class Value, class Hasher>
@@ -263,7 +261,7 @@ class PersistentMap<Key, Value, Hasher>::iterator {
if (current_->more) {
more_iter_ = current_->more->begin();
}
- } while ((**this).second == def_value());
+ } while (!((**this).second != def_value()));
return *this;
}
@@ -281,12 +279,10 @@ class PersistentMap<Key, Value, Hasher>::iterator {
bool operator<(const iterator& other) const {
if (is_end()) return false;
if (other.is_end()) return true;
- if (current_->key_hash < other.current_->key_hash) {
- return true;
- } else if (current_->key_hash == other.current_->key_hash) {
+ if (current_->key_hash == other.current_->key_hash) {
return (**this).first < (*other).first;
} else {
- return false;
+ return current_->key_hash < other.current_->key_hash;
}
}
@@ -300,6 +296,9 @@ class PersistentMap<Key, Value, Hasher>::iterator {
if (i.current_->more) {
i.more_iter_ = i.current_->more->begin();
}
+ // Skip entries with default value. PersistentMap iterators must never point
+ // to a default value.
+ while (!i.is_end() && !((*i).second != def_value)) ++i;
return i;
}
@@ -333,8 +332,18 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
}
double_iterator& operator++() {
- if (first_current_) ++first_;
- if (second_current_) ++second_;
+#ifdef DEBUG
+ iterator old_first = first_;
+ iterator old_second = second_;
+#endif
+ if (first_current_) {
+ ++first_;
+ DCHECK(old_first < first_);
+ }
+ if (second_current_) {
+ ++second_;
+ DCHECK(old_second < second_);
+ }
return *this = double_iterator(first_, second_);
}
@@ -346,6 +355,7 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
first_current_ = true;
second_current_ = false;
} else {
+ DCHECK(second_ < first_);
first_current_ = false;
second_current_ = true;
}
@@ -365,14 +375,13 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
};
template <class Key, class Value, class Hasher>
-PersistentMap<Key, Value, Hasher> PersistentMap<Key, Value, Hasher>::Add(
- Key key, Value value) const {
+void PersistentMap<Key, Value, Hasher>::Set(Key key, Value value) {
HashValue key_hash = HashValue(Hasher()(key));
std::array<const FocusedTree*, kHashBits> path;
int length = 0;
const FocusedTree* old = FindHash(key_hash, &path, &length);
ZoneMap<Key, Value>* more = nullptr;
- if (GetFocusedValue(old, key) == value) return *this;
+ if (!(GetFocusedValue(old, key) != value)) return;
if (old && !(old->more == nullptr && old->key_value.key() == key)) {
more = new (zone_->New(sizeof(*more))) ZoneMap<Key, Value>(zone_);
if (old->more) {
@@ -393,7 +402,7 @@ PersistentMap<Key, Value, Hasher> PersistentMap<Key, Value, Hasher>::Add(
for (int i = 0; i < length; ++i) {
tree->path(i) = path[i];
}
- return PersistentMap(tree, zone_, def_value_);
+ *this = PersistentMap(tree, zone_, def_value_);
}
template <class Key, class Value, class Hasher>
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index b2bf3ac76a..900cd1cd8f 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -98,4 +98,4 @@ class PhaseScope {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_PIPELINE_STATISTICS_H_
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index b4567ab04f..fe29917e61 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -83,6 +83,11 @@ struct ProtectedInstructionData;
namespace compiler {
+// Turbofan can only handle 2^16 control inputs. Since each control flow split
+// requires at least two bytes (jump and offset), we limit the bytecode size
+// to 128K bytes.
+const int kMaxBytecodeSizeForTurbofan = 128 * 1024;
+
class PipelineData {
public:
// For main entry point.
@@ -292,32 +297,32 @@ class PipelineData {
register_allocation_data_ = nullptr;
}
- void InitializeInstructionSequence(const CallDescriptor* descriptor) {
+ void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
DCHECK_NULL(sequence_);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
sequence_ = new (instruction_zone())
InstructionSequence(isolate(), instruction_zone(), instruction_blocks);
- if (descriptor && descriptor->RequiresFrameAsIncoming()) {
+ if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
} else {
- DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
}
}
- void InitializeFrameData(CallDescriptor* descriptor) {
+ void InitializeFrameData(CallDescriptor* call_descriptor) {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
- if (descriptor != nullptr) {
- fixed_frame_size = descriptor->CalculateFixedFrameSize();
+ if (call_descriptor != nullptr) {
+ fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
- CallDescriptor* descriptor) {
+ CallDescriptor* call_descriptor) {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
@@ -336,10 +341,12 @@ class PipelineData {
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
- code_generator_ =
- new CodeGenerator(codegen_zone(), frame(), linkage, sequence(), info(),
- isolate(), osr_helper_, start_source_position_,
- jump_optimization_info_, protected_instructions_);
+ code_generator_ = new CodeGenerator(
+ codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
+ osr_helper_, start_source_position_, jump_optimization_info_,
+ protected_instructions_,
+ info()->is_poison_loads() ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -451,7 +458,7 @@ class PipelineImpl final {
void RunPrintAndVerify(const char* phase, bool untyped = false);
Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor, bool run_verifier);
+ CallDescriptor* call_descriptor, bool run_verifier);
CompilationInfo* info() const;
Isolate* isolate() const;
@@ -778,6 +785,11 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) {
+ if (compilation_info()->shared_info()->bytecode_array()->length() >
+ kMaxBytecodeSizeForTurbofan) {
+ return AbortOptimization(BailoutReason::kFunctionTooBig);
+ }
+
if (!FLAG_always_opt) {
compilation_info()->MarkAsBailoutOnUninitialized();
}
@@ -790,7 +802,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_inline_accessors) {
compilation_info()->MarkAsAccessorInliningEnabled();
}
- if (compilation_info()->closure()->feedback_vector_cell()->map() ==
+ if (FLAG_branch_load_poisoning) {
+ compilation_info()->MarkAsPoisonLoads();
+ }
+ if (compilation_info()->closure()->feedback_cell()->map() ==
isolate->heap()->one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -895,7 +910,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
public:
explicit PipelineWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>* protected_insts,
bool asmjs_origin)
: CompilationJob(isolate->stack_guard()->real_climit(), nullptr, info,
@@ -906,7 +921,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
data_(&zone_stats_, isolate, info, jsgraph, pipeline_statistics_.get(),
source_positions, protected_insts),
pipeline_(&data_),
- linkage_(descriptor),
+ linkage_(call_descriptor),
asmjs_origin_(asmjs_origin) {}
protected:
@@ -955,7 +970,8 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph(), asmjs_origin_);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ scope.zone());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -986,11 +1002,12 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
wasm_code_desc->safepoint_table_offset =
code_generator->GetSafepointTableOffset();
+ wasm_code_desc->handler_table_offset =
+ code_generator->GetHandlerTableOffset();
wasm_code_desc->frame_slot_count =
code_generator->frame()->GetTotalFrameSlotCount();
wasm_code_desc->source_positions_table =
code_generator->GetSourcePositionTable();
- wasm_code_desc->handler_table = code_generator->GetHandlerTable();
}
return SUCCEEDED;
}
@@ -1113,7 +1130,8 @@ struct InliningPhase {
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
data->info()->is_bailout_on_uninitialized()
? JSCallReducer::kBailoutOnUninitialized
@@ -1217,7 +1235,8 @@ struct TypedLoweringPhase {
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &create_lowering);
@@ -1324,7 +1343,8 @@ struct EarlyOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1391,7 +1411,8 @@ struct EffectControlLinearizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -1427,7 +1448,8 @@ struct LoadEliminationPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1450,7 +1472,10 @@ struct MemoryOptimizationPhase {
trimmer.TrimGraph(roots.begin(), roots.end());
// Optimize allocations and load/store operations.
- MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
+ MemoryOptimizer optimizer(data->jsgraph(), temp_zone,
+ data->info()->is_poison_loads()
+ ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
optimizer.Optimize();
}
};
@@ -1467,7 +1492,8 @@ struct LateOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
@@ -1525,6 +1551,12 @@ struct InstructionSelectionPhase {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
+ data->info()->switch_jump_table_enabled()
+ ? InstructionSelector::kEnableSwitchJumpTable
+ : InstructionSelector::kDisableSwitchJumpTable,
+ data->info()->is_generating_speculation_poison_on_entry()
+ ? InstructionSelector::kEnableSpeculationPoison
+ : InstructionSelector::kDisableSpeculationPoison,
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1534,7 +1566,9 @@ struct InstructionSelectionPhase {
: InstructionSelector::kDisableScheduling,
data->isolate()->serializer_enabled()
? InstructionSelector::kEnableSerialization
- : InstructionSelector::kDisableSerialization);
+ : InstructionSelector::kDisableSerialization,
+ data->info()->is_poison_loads() ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
@@ -2012,8 +2046,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Isolate* isolate, Graph* graph,
Schedule* schedule) {
- CallDescriptor* call_descriptor =
- Linkage::ComputeIncoming(info->zone(), info);
+ auto call_descriptor = Linkage::ComputeIncoming(info->zone(), info);
return GenerateCodeForTesting(info, isolate, call_descriptor, graph,
schedule);
}
@@ -2071,10 +2104,10 @@ CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
// static
CompilationJob* Pipeline::NewWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
wasm::ModuleOrigin asmjs_origin) {
- return new PipelineWasmCompilationJob(info, isolate, jsgraph, descriptor,
+ return new PipelineWasmCompilationJob(info, isolate, jsgraph, call_descriptor,
source_positions,
protected_instructions, asmjs_origin);
}
@@ -2105,7 +2138,7 @@ void PipelineImpl::ComputeScheduledGraph() {
}
bool PipelineImpl::SelectInstructions(Linkage* linkage) {
- CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+ auto call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
// We should have a scheduled graph.
@@ -2186,6 +2219,10 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
+ } else if (data->info()->is_poison_loads()) {
+ CHECK(InstructionSelector::SupportsSpeculationPoisoning());
+ AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
+ run_verifier);
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2276,7 +2313,7 @@ Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
}
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
+ CallDescriptor* call_descriptor,
bool run_verifier) {
PipelineData* data = this->data_;
// Don't track usage for this zone in compiler stats.
@@ -2294,7 +2331,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
- data->InitializeRegisterAllocationData(config, descriptor);
+ data->InitializeRegisterAllocationData(config, call_descriptor);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index b5b6b5f142..92b128c357 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -46,7 +46,7 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
wasm::ModuleOrigin wasm_origin);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 7fc537784c..6bdf8fa974 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -34,7 +34,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_branch_and_poison:
case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
return SetRC;
@@ -270,6 +272,16 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -779,21 +791,40 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ Register scratch = kScratchReg;
+
+ Label current_pc;
+ __ mov_label_addr(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(scratch, scratch, Operand(__ pc_offset()));
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ mov_label_addr(r11, &current);
- int pc_offset = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
- __ LoadP(r11, MemOperand(r11, offset));
+ if (FLAG_debug_code) {
+ // Check that {kJavaScriptCallCodeStartRegister} is correct.
+ Label current_pc;
+ __ mov_label_addr(ip, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(ip, ip, Operand(__ pc_offset()));
+ __ cmp(ip, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+ }
+
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
@@ -802,6 +833,37 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ Register scratch = kScratchReg;
+
+ Label current_pc;
+ __ mov_label_addr(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(scratch, scratch, Operand(__ pc_offset()));
+
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ mr(kSpeculationPoisonRegister, scratch);
+ __ sub(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sub(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ scratch);
+ __ orx(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ ShiftRightArithImm(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ notx(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ and_(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -909,9 +971,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
@@ -1808,26 +1871,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -1856,47 +1926,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break;
default:
@@ -1931,6 +2001,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ ArchOpcode op = instr->arch_opcode();
+ condition = NegateFlagsCondition(condition);
+ __ li(kScratchReg, Operand::Zero());
+ __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
+ kScratchReg, kSpeculationPoisonRegister, cr0);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -1978,8 +2062,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2109,8 +2194,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
if (double_saves != 0) {
@@ -2121,10 +2206,10 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// register save area does not include the fp or constant pool pointer.
const int num_saves =
@@ -2135,9 +2220,9 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ function_descriptor();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -2148,9 +2233,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(r0, fp);
__ mr(fp, sp);
}
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(ip);
- if (descriptor->PushArgumentCount()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2161,8 +2246,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2174,9 +2259,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
__ Add(sp, sp, -shrink_slots * kPointerSize, r0);
}
@@ -2189,10 +2275,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save callee-saved registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
@@ -2200,26 +2286,26 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
// Restore registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
PPCOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index fced5565df..70a6c9ee69 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -194,7 +194,7 @@ void InstructionSelector::VisitLoad(Node* node) {
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -234,6 +234,12 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+
+ if (node->opcode() == IrOpcode::kPoisonedLoad &&
+ load_poisoning_ == LoadPoisoning::kDoPoison) {
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
@@ -246,6 +252,8 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1118,6 +1126,16 @@ void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
VisitRR(this, kPPC_DoubleToUint32, node);
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord8, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord16, node);
+}
+
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
@@ -1144,6 +1162,20 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kPPC_ExtendSignWord32, node);
}
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord8, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord16, node);
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord32, node);
+}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
@@ -1533,14 +1565,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
g.UseRegister(right), cont);
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1549,58 +1580,58 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1612,28 +1643,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
- selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+ this, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(selector, node,
- kPPC_SubWithOverflow32,
- kInt16Imm_Negate, cont);
+ return VisitBinop<Int32BinopMatcher>(
+ this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64,
kInt16Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub,
kInt16Imm_Negate, cont);
#endif
default:
@@ -1643,10 +1673,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
// TODO(mbandy): opportunity for rlwinm?
- return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+ return VisitWordCompare(this, value, kPPC_Tst32, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
@@ -1658,10 +1688,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// case IrOpcode::kWord32Ror:
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
// TODO(mbandy): opportunity for rldic?
- return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+ return VisitWordCompare(this, value, kPPC_Tst64, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
@@ -1678,84 +1708,36 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
// Branch could not be combined with a compare, emit compare against 0.
- PPCOperandGenerator g(selector);
- VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ PPCOperandGenerator g(this);
+ VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
cont);
}
-
-void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
-}
-
-
-#if V8_TARGET_ARCH_PPC64
-void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
-}
-#endif
-
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kPPC_Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kPPC_Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1765,10 +1747,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
- }
VisitWord32Compare(this, node, &cont);
}
@@ -1802,10 +1780,6 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int64BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
- }
VisitWord64Compare(this, node, &cont);
}
@@ -1883,16 +1857,15 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
PPCOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1962,7 +1935,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1970,13 +1943,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1986,7 +1961,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1995,13 +1970,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2017,7 +1992,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2025,15 +2000,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2051,19 +2026,19 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
@@ -2241,9 +2216,9 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
// TODO(John): Port.
}
@@ -2257,6 +2232,12 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2279,6 +2260,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index ed67c06cc7..778752e50f 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -212,28 +212,29 @@ void RawMachineAssembler::Comment(const char* msg) {
AddNode(machine()->Comment(msg));
}
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
- Node* const* inputs) {
- DCHECK(!desc->NeedsFrameState());
+Node* RawMachineAssembler::CallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
+ DCHECK(!call_descriptor->NeedsFrameState());
// +1 is for target.
- DCHECK_EQ(input_count, desc->ParameterCount() + 1);
- return AddNode(common()->Call(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
+ return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
+Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* call_descriptor,
int input_count,
Node* const* inputs) {
- DCHECK(desc->NeedsFrameState());
+ DCHECK(call_descriptor->NeedsFrameState());
// +2 is for target and frame state.
- DCHECK_EQ(input_count, desc->ParameterCount() + 2);
- return AddNode(common()->Call(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 2);
+ return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
- Node* const* inputs) {
+Node* RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
// +1 is for target.
- DCHECK_EQ(input_count, desc->ParameterCount() + 1);
- Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
+ Node* tail_call =
+ MakeNode(common()->TailCall(call_descriptor), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -243,10 +244,10 @@ Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
Node* function) {
MachineSignature::Builder builder(zone(), 1, 0);
builder.AddReturn(return_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function);
+ return AddNode(common()->Call(call_descriptor), function);
}
@@ -256,10 +257,10 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0);
+ return AddNode(common()->Call(call_descriptor), function, arg0);
}
Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
@@ -268,13 +269,13 @@ Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- descriptor->set_save_fp_mode(mode);
+ call_descriptor->set_save_fp_mode(mode);
- return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
- arg0);
+ return AddNode(common()->CallWithCallerSavedRegisters(call_descriptor),
+ function, arg0);
}
Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
@@ -285,10 +286,10 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1);
}
Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
@@ -301,10 +302,10 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
@@ -316,13 +317,13 @@ Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- descriptor->set_save_fp_mode(mode);
+ call_descriptor->set_save_fp_mode(mode);
- return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
- arg0, arg1, arg2);
+ return AddNode(common()->CallWithCallerSavedRegisters(call_descriptor),
+ function, arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction4(
@@ -335,10 +336,11 @@ Node* RawMachineAssembler::CallCFunction4(
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
builder.AddParam(arg3_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3);
}
Node* RawMachineAssembler::CallCFunction5(
@@ -353,11 +355,11 @@ Node* RawMachineAssembler::CallCFunction5(
builder.AddParam(arg2_type);
builder.AddParam(arg3_type);
builder.AddParam(arg4_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
- arg4);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3, arg4);
}
Node* RawMachineAssembler::CallCFunction6(
@@ -373,11 +375,11 @@ Node* RawMachineAssembler::CallCFunction6(
builder.AddParam(arg3_type);
builder.AddParam(arg4_type);
builder.AddParam(arg5_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
- arg4, arg5);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3, arg4, arg5);
}
Node* RawMachineAssembler::CallCFunction8(
@@ -397,9 +399,9 @@ Node* RawMachineAssembler::CallCFunction8(
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
Node* args[] = {function, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), arraysize(args), args);
+ return AddNode(common()->Call(call_descriptor), arraysize(args), args);
}
Node* RawMachineAssembler::CallCFunction9(
@@ -421,9 +423,9 @@ Node* RawMachineAssembler::CallCFunction9(
builder.AddParam(arg8_type);
Node* args[] = {function, arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8};
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), arraysize(args), args);
+ return AddNode(common()->Call(call_descriptor), arraysize(args), args);
}
BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 1cc56b3379..3d689a089c 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -162,15 +162,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Atomic memory operations.
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- return AddNode(machine()->AtomicLoad(type), base, index);
+ return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
- return AddNode(machine()->AtomicStore(rep), base, index, value);
+ return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
#define ATOMIC_FUNCTION(name) \
Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value) { \
- return AddNode(machine()->Atomic##name(rep), base, index, value); \
+ return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
}
ATOMIC_FUNCTION(Exchange);
ATOMIC_FUNCTION(Add);
@@ -182,7 +182,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
Node* old_value, Node* new_value) {
- return AddNode(machine()->AtomicCompareExchange(rep), base, index,
+ return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index,
old_value, new_value);
}
@@ -744,19 +744,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
+ Node* SpeculationPoison() {
+ return AddNode(machine()->SpeculationPoison(), graph()->start());
+ }
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+ Node* CallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Call a given call descriptor and the given arguments and frame-state.
// The call target and frame state are passed as part of the {inputs} array.
- Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+ Node* CallNWithFrameState(CallDescriptor* call_descriptor, int input_count,
Node* const* inputs);
// Tail call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+ Node* TailCallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
@@ -903,6 +908,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
NodeVector parameters_;
+ Node* speculation_poison_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index a961f0012f..7e22ab22ad 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
-#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#ifndef V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
#include "src/compiler/instruction.h"
#include "src/zone/zone-containers.h"
@@ -270,4 +270,4 @@ class RegisterAllocatorVerifier final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 7f65695ee2..43eb408f1e 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -713,7 +713,7 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
break;
case UsePositionType::kRequiresRegister:
DCHECK(op.IsRegister() || op.IsFPRegister());
- // Fall through.
+ V8_FALLTHROUGH;
case UsePositionType::kAny:
InstructionOperand::ReplaceWith(pos->operand(), &op);
break;
@@ -1897,10 +1897,10 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kSimd128:
result -= config()->num_float_registers();
- // Fall through.
+ V8_FALLTHROUGH;
case MachineRepresentation::kFloat32:
result -= config()->num_double_registers();
- // Fall through.
+ V8_FALLTHROUGH;
case MachineRepresentation::kFloat64:
result -= config()->num_general_registers();
break;
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 63e94fbdc8..4f6002874c 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
+#ifndef V8_COMPILER_REGISTER_ALLOCATOR_H_
+#define V8_COMPILER_REGISTER_ALLOCATOR_H_
#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
@@ -1213,4 +1213,4 @@ class LiveRangeConnector final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_REGISTER_ALLOCATOR_H_
+#endif // V8_COMPILER_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f8a5a9c504..739fb421ab 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -588,10 +588,11 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (use_info.type_check() == TypeCheckKind::kNumber ||
(use_info.type_check() == TypeCheckKind::kNumberOrOddball &&
!output_type->Maybe(Type::BooleanOrNullOrNumber()))) {
- op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber);
+ op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber,
+ use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTaggedToFloat64(
- CheckTaggedInputMode::kNumberOrOddball);
+ CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
}
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
@@ -767,6 +768,7 @@ Node* RepresentationChanger::GetBitRepresentationFor(
} else if (m.Is(factory()->true_value())) {
return jsgraph()->Int32Constant(1);
}
+ break;
}
default:
break;
@@ -1062,11 +1064,11 @@ Node* RepresentationChanger::TypeError(Node* node,
std::ostringstream use_str;
use_str << use;
- V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of "
- "%s cannot be changed to %s",
- node->id(), node->op()->mnemonic(), out_str.str().c_str(),
- use_str.str().c_str());
+ FATAL(
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+ use_str.str().c_str());
}
return node;
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index b23a3dac5b..571f13cd7d 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -203,25 +203,29 @@ class UseInfo {
Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
feedback);
}
- static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
+ static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32,
- Truncation::Any(identify_zeros), TypeCheckKind::kSigned32);
+ Truncation::Any(identify_zeros), TypeCheckKind::kSigned32,
+ feedback);
}
- static UseInfo CheckedNumberAsFloat64() {
+ static UseInfo CheckedNumberAsFloat64(const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
- TypeCheckKind::kNumber);
+ TypeCheckKind::kNumber, feedback);
}
- static UseInfo CheckedNumberAsWord32() {
+ static UseInfo CheckedNumberAsWord32(const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
- TypeCheckKind::kNumber);
+ TypeCheckKind::kNumber, feedback);
}
- static UseInfo CheckedNumberOrOddballAsFloat64() {
+ static UseInfo CheckedNumberOrOddballAsFloat64(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
- TypeCheckKind::kNumberOrOddball);
+ TypeCheckKind::kNumberOrOddball, feedback);
}
- static UseInfo CheckedNumberOrOddballAsWord32() {
+ static UseInfo CheckedNumberOrOddballAsWord32(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
- TypeCheckKind::kNumberOrOddball);
+ TypeCheckKind::kNumberOrOddball, feedback);
}
// Undetermined representation.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index c0d3146be1..8327528c71 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -260,28 +260,28 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_NOP;
- // fall through
+ V8_FALLTHROUGH;
case kSignedLessThan:
return lt;
case kUnsignedGreaterThanOrEqual:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_ALWAYS;
- // fall through
+ V8_FALLTHROUGH;
case kSignedGreaterThanOrEqual:
return ge;
case kUnsignedLessThanOrEqual:
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_EQ;
- // fall through
+ V8_FALLTHROUGH;
case kSignedLessThanOrEqual:
return le;
case kUnsignedGreaterThan:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return ne;
- // fall through
+ V8_FALLTHROUGH;
case kSignedGreaterThan:
return gt;
case kOverflow:
@@ -984,6 +984,16 @@ void AdjustStackPointerForTailCall(
}
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1028,21 +1038,33 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ Register scratch = r1;
+ int pc_offset = __ pc_offset();
+ __ larl(scratch, Operand(-pc_offset/2));
+ __ CmpP(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ larl(r1, &current);
- int pc_offset = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
- __ LoadP(ip, MemOperand(r1, offset));
+ if (FLAG_debug_code) {
+ // Check that {kJavaScriptCallCodeStartRegister} is correct.
+ int pc_offset = __ pc_offset();
+ __ larl(ip, Operand(-pc_offset/2));
+ __ CmpP(ip, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+ }
+
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ LoadP(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadW(ip,
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
@@ -1051,6 +1073,37 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ Register scratch = r1;
+
+ Label current_pc;
+ __ larl(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ SubP(scratch, Operand(__ pc_offset()));
+
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ LoadRR(kSpeculationPoisonRegister, scratch);
+ __ SubP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ SubP(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ scratch);
+ __ OrP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ ShiftRightArithP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ Operand(kBitsPerPointer - 1));
+ __ NotP(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ AndP(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1155,9 +1208,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CmpP(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1925,15 +1979,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kS390_ExtendSignWord8:
+ case kS390_SignExtendWord8ToInt32:
__ lbr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
- case kS390_ExtendSignWord16:
+ case kS390_SignExtendWord16ToInt32:
__ lhr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
- case kS390_ExtendSignWord32:
+ case kS390_SignExtendWord8ToInt64:
+ __ lgbr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_SignExtendWord16ToInt64:
+ __ lghr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_SignExtendWord32ToInt64:
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Uint32ToUint64:
@@ -2106,6 +2166,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadB);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
@@ -2123,27 +2184,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadlB);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadlW);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadW);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2157,6 +2226,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2204,28 +2274,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
__ LoadB(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
__ LoadlB(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
__ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
__ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
__ LoadlW(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
__ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
__ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
__ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
// 0x aa bb cc dd
@@ -2281,8 +2351,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#endif
- case kAtomicExchangeInt8:
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeInt8:
+ case kWord32AtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2313,15 +2383,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE_BYTE(3);
__ bind(&done);
- if (opcode == kAtomicExchangeInt8) {
+ if (opcode == kWord32AtomicExchangeInt8) {
__ lbr(output, output);
} else {
__ llcr(output, output);
}
break;
}
- case kAtomicExchangeInt16:
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeInt16:
+ case kWord32AtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2340,14 +2410,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE_HALFWORD(1);
__ bind(&done);
- if (opcode == kAtomicExchangeInt8) {
+ if (opcode == kWord32AtomicExchangeInt8) {
__ lhr(output, output);
} else {
__ llhr(output, output);
}
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2389,6 +2459,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ XorP(r0, r0);
+ __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
+ kSpeculationPoisonRegister, r0);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2436,8 +2519,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2537,8 +2621,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
if (double_saves != 0) {
@@ -2549,7 +2633,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// register save area does not include the fp or constant pool pointer.
const int num_saves = kNumCalleeSaved - 1;
@@ -2559,15 +2643,15 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(r14, fp);
__ LoadRR(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2578,8 +2662,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2591,9 +2675,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
__ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
}
@@ -2606,7 +2691,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save callee-saved registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
@@ -2614,23 +2699,23 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
S390OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index cb94da2ec7..b5296f63d0 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -99,9 +99,11 @@ namespace compiler {
V(S390_PushFrame) \
V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
- V(S390_ExtendSignWord8) \
- V(S390_ExtendSignWord16) \
- V(S390_ExtendSignWord32) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
V(S390_Uint32ToUint64) \
V(S390_Int64ToInt32) \
V(S390_Int64ToFloat32) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 1850830f6e..fd388a219a 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -96,9 +96,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_CmpDouble:
case kS390_Tst32:
case kS390_Tst64:
- case kS390_ExtendSignWord8:
- case kS390_ExtendSignWord16:
- case kS390_ExtendSignWord32:
+ case kS390_SignExtendWord8ToInt32:
+ case kS390_SignExtendWord16ToInt32:
+ case kS390_SignExtendWord8ToInt64:
+ case kS390_SignExtendWord16ToInt64:
+ case kS390_SignExtendWord32ToInt64:
case kS390_Uint32ToUint64:
case kS390_Int64ToInt32:
case kS390_Int64ToFloat32:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 457c5a1d82..61a335d46e 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -329,6 +329,8 @@ ArchOpcode SelectLoadOpcode(Node* node) {
V(Word32Popcnt) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
/* Word32 bin op */ \
V(Int32Add) \
V(Int32Sub) \
@@ -717,17 +719,24 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
- ArchOpcode opcode = SelectLoadOpcode(node);
+ InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
+ opcode |= AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
+ Emit(opcode, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1019,7 +1028,7 @@ static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
- selector->Emit(kS390_ExtendSignWord16,
+ selector->Emit(kS390_SignExtendWord16ToInt32,
canEliminateZeroExt ? g.DefineSameAsFirst(node)
: g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -1027,7 +1036,7 @@ static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
return true;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
- selector->Emit(kS390_ExtendSignWord8,
+ selector->Emit(kS390_SignExtendWord8ToInt32,
canEliminateZeroExt ? g.DefineSameAsFirst(node)
: g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -1415,6 +1424,10 @@ static inline bool TryMatchDoubleConstructFromInsert(
null) \
V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
null) \
+ V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
+ OperandMode::kNone, null) \
V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
OperandMode::kNone, null)
@@ -1427,8 +1440,14 @@ static inline bool TryMatchDoubleConstructFromInsert(
OperandMode::kNone, null)
#define WORD32_UNARY_OP_LIST(V) \
WORD32_UNARY_OP_LIST_32(V) \
- V(Word32, ChangeInt32ToInt64, kS390_ExtendSignWord32, OperandMode::kNone, \
- null) \
+ V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
+ OperandMode::kNone, null) \
V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
[&]() -> bool { \
if (ProduceWord32Result(node->InputAt(0))) { \
@@ -1650,10 +1669,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
}
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont);
-
void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
Node* node, Node* value, FlagsContinuation* cont,
bool discard_output = false);
@@ -1841,13 +1856,13 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
}
}
+} // namespace
+
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1857,7 +1872,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
FlagsCondition fc = cont->condition();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -1866,31 +1881,31 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
default:
break;
}
}
}
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
}
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -1899,50 +1914,50 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
default:
break;
}
}
}
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1954,46 +1969,46 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32BinOp(selector, node, kS390_Add32,
- AddOperandMode, cont);
+ return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
+ cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32BinOp(selector, node, kS390_Sub32,
- SubOperandMode, cont);
+ return VisitWord32BinOp(this, node, kS390_Sub32, SubOperandMode,
+ cont);
case IrOpcode::kInt32MulWithOverflow:
if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitWord32BinOp(
- selector, node, kS390_Mul32,
+ this, node, kS390_Mul32,
OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
} else {
cont->OverwriteAndNegateIfEqual(kNotEqual);
return VisitWord32BinOp(
- selector, node, kS390_Mul32WithOverflow,
+ this, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
cont);
}
case IrOpcode::kInt32AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32UnaryOp(selector, node, kS390_Abs32,
+ return VisitWord32UnaryOp(this, node, kS390_Abs32,
OperandMode::kNone, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64UnaryOp(selector, node, kS390_Abs64,
+ return VisitWord64UnaryOp(this, node, kS390_Abs64,
OperandMode::kNone, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64BinOp(selector, node, kS390_Add64,
- AddOperandMode, cont);
+ return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
+ cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64BinOp(selector, node, kS390_Sub64,
- SubOperandMode, cont);
+ return VisitWord64BinOp(this, node, kS390_Sub64, SubOperandMode,
+ cont);
#endif
default:
break;
@@ -2003,17 +2018,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kInt32Sub:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
break;
case IrOpcode::kWord32And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
case IrOpcode::kLoad: {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
- if (opcode == kS390_LoadAndTestWord32) {
- return VisitLoadAndTest(selector, opcode, user, value, cont);
- }
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
+ cont);
default:
break;
}
@@ -2024,13 +2038,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kWord32Or:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32BinOp(selector, value, kS390_Or32, Or32OperandMode,
+ return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode,
cont);
break;
case IrOpcode::kWord32Xor:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32BinOp(selector, value, kS390_Xor32,
- Xor32OperandMode, cont);
+ return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
+ cont);
break;
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Shl:
@@ -2041,22 +2055,22 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64Sub:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
break;
case IrOpcode::kWord64And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
case IrOpcode::kInt64Add:
// can't handle overflow case.
break;
case IrOpcode::kWord64Or:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64BinOp(selector, value, kS390_Or64, Or64OperandMode,
+ return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode,
cont);
break;
case IrOpcode::kWord64Xor:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64BinOp(selector, value, kS390_Xor64,
- Xor64OperandMode, cont);
+ return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
+ cont);
break;
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shl:
@@ -2071,54 +2085,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
// Branch could not be combined with a compare, emit LoadAndTest
- VisitLoadAndTest(selector, opcode, user, value, cont, true);
-}
-
-void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
-}
-
-#if V8_TARGET_ARCH_S390X
-void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
-}
-#endif
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+ VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -2126,22 +2093,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- }
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
#if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister();
Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
@@ -2150,6 +2118,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
+ }
// Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand);
@@ -2159,7 +2128,8 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(),
+ m.left().node(), &cont, true);
}
VisitWord32Compare(this, node, &cont);
}
@@ -2191,7 +2161,8 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(),
+ m.left().node(), &cont, true);
}
VisitWord64Compare(this, node, &cont);
}
@@ -2252,14 +2223,14 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
S390OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -2301,7 +2272,7 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2309,13 +2280,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2325,7 +2298,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2334,13 +2307,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2356,7 +2329,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2364,15 +2337,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2390,19 +2363,19 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
@@ -2572,9 +2545,9 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
// TODO(John): Port.
}
@@ -2588,6 +2561,12 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2613,6 +2592,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 423d757a4f..980c88a6e6 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -85,6 +85,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -116,6 +117,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -138,6 +140,7 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8ShrS) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -354,7 +357,6 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
int num_lanes = NumLanes(rep_type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, rep_type);
- DCHECK_LT(2, node->InputCount());
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
@@ -387,14 +389,30 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+#else
+ rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -436,7 +454,8 @@ Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
DCHECK(input_rep_type == SimdType::kInt16x8 ||
input_rep_type == SimdType::kInt8x16);
@@ -446,9 +465,29 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
int32_t shift_val =
(input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]),
- shift_val);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i], rep_right[i]), shift_val);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+#else
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -578,9 +617,9 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
args[3] = graph()->start();
Signature<MachineType>::Builder sig_builder(zone(), 0, 1);
sig_builder.AddParam(MachineType::Pointer());
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), sig_builder.Build());
- Node* call = graph()->NewNode(common()->Call(desc), 4, args);
+ Node* call = graph()->NewNode(common()->Call(call_descriptor), 4, args);
return graph()->NewNode(machine()->Load(LoadRepresentation::Float64()),
stack_slot, jsgraph_->Int32Constant(0), call,
graph()->start());
@@ -636,7 +675,8 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
break;
case IrOpcode::kI16x8ShrU:
- rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
+ rep_node[i] = Mask(rep_node[i], kMask16);
+ V8_FALLTHROUGH;
case IrOpcode::kI32x4ShrU:
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
@@ -751,22 +791,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep =
StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
- store_op = machine()->Store(StoreRepresentation(
- MachineTypeFrom(rep_type).representation(), write_barrier_kind));
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->Store(
+ StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
+ write_barrier_kind));
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
- store_op =
- machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->UnalignedStore(
+ MachineTypeFrom(input_rep_type).representation());
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kReturn: {
@@ -779,18 +832,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kCall: {
// TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128())) {
+ (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128())) {
// We have to adjust the call descriptor.
- const Operator* op =
- common()->Call(GetI32WasmCallDescriptorForSimd(zone(), descriptor));
+ const Operator* op = common()->Call(
+ GetI32WasmCallDescriptorForSimd(zone(), call_descriptor));
NodeProperties::ChangeOp(node, op);
}
- if (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128()) {
+ if (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kNumLanes32];
for (int i = 0; i < kNumLanes32; ++i) {
@@ -831,6 +884,14 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
+ case IrOpcode::kI32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
+ case IrOpcode::kI16x8AddHoriz: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
case IrOpcode::kI16x8Add:
case IrOpcode::kI8x16Add: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
@@ -940,6 +1001,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerShiftOp(node, rep_type);
break;
}
+ case IrOpcode::kF32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Float32Add(), false);
+ break;
+ }
#define F32X4_BINOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
@@ -1002,7 +1067,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
- Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** old_rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = old_rep_node[i];
+ }
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
@@ -1075,11 +1144,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
- jsgraph_->Int32Constant(0)));
- rep_node[i] = d.Phi(MachineTypeFrom(rep_type).representation(),
- rep_right[1], rep_left[0]);
+ Node* tmp1 =
+ graph()->NewNode(machine()->Word32Xor(), rep_left[i], rep_right[i]);
+ Node* tmp2 =
+ graph()->NewNode(machine()->Word32And(), boolean_input[i], tmp1);
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Xor(), rep_right[i], tmp2);
}
ReplaceNode(node, rep_node, num_lanes);
break;
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index f7f276cd5e..ad329877e2 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -73,12 +73,13 @@ class SimdScalarLowering {
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
- void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op);
+ void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
+ bool not_horizontal = true);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
- const Operator* op);
+ const Operator* op, bool not_horizontal = true);
Node* Mask(Node* input, int32_t mask);
void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
const Operator* op, bool is_signed);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 6e6c011fc1..bde73f4e59 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -82,29 +82,32 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kFloat32;
case kExternalFloat64Array:
return MachineRepresentation::kFloat64;
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
+ UNIMPLEMENTED();
}
UNREACHABLE();
}
UseInfo CheckedUseInfoAsWord32FromHint(
- NumberOperationHint hint,
+ NumberOperationHint hint, const VectorSlotPair& feedback = VectorSlotPair(),
IdentifyZeros identify_zeros = kDistinguishZeros) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- return UseInfo::CheckedSignedSmallAsWord32(identify_zeros,
- VectorSlotPair());
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback);
case NumberOperationHint::kSigned32:
- return UseInfo::CheckedSigned32AsWord32(identify_zeros);
+ return UseInfo::CheckedSigned32AsWord32(identify_zeros, feedback);
case NumberOperationHint::kNumber:
- return UseInfo::CheckedNumberAsWord32();
+ return UseInfo::CheckedNumberAsWord32(feedback);
case NumberOperationHint::kNumberOrOddball:
- return UseInfo::CheckedNumberOrOddballAsWord32();
+ return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
}
UNREACHABLE();
}
-UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
+UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint,
+ const VectorSlotPair& feedback) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
@@ -113,9 +116,9 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
UNREACHABLE();
break;
case NumberOperationHint::kNumber:
- return UseInfo::CheckedNumberAsFloat64();
+ return UseInfo::CheckedNumberAsFloat64(feedback);
case NumberOperationHint::kNumberOrOddball:
- return UseInfo::CheckedNumberOrOddballAsFloat64();
+ return UseInfo::CheckedNumberOrOddballAsFloat64(feedback);
}
UNREACHABLE();
}
@@ -1022,8 +1025,8 @@ class RepresentationSelector {
}
void VisitCall(Node* node, SimplifiedLowering* lowering) {
- const CallDescriptor* desc = CallDescriptorOf(node->op());
- int params = static_cast<int>(desc->ParameterCount());
+ auto call_descriptor = CallDescriptorOf(node->op());
+ int params = static_cast<int>(call_descriptor->ParameterCount());
int value_input_count = node->op()->ValueInputCount();
// Propagate representation information from call descriptor.
for (int i = 0; i < value_input_count; i++) {
@@ -1033,15 +1036,15 @@ class RepresentationSelector {
} else if ((i - 1) < params) {
ProcessInput(node, i,
TruncatingUseInfoFromRepresentation(
- desc->GetInputType(i).representation()));
+ call_descriptor->GetInputType(i).representation()));
} else {
ProcessInput(node, i, UseInfo::AnyTagged());
}
}
ProcessRemainingInputs(node, value_input_count);
- if (desc->ReturnCount() > 0) {
- SetOutput(node, desc->GetReturnType(0).representation());
+ if (call_descriptor->ReturnCount() > 0) {
+ SetOutput(node, call_descriptor->GetReturnType(0).representation());
} else {
SetOutput(node, MachineRepresentation::kTagged);
}
@@ -1234,12 +1237,16 @@ class RepresentationSelector {
MachineRepresentation field_representation, int field_offset,
Type* field_type, MachineRepresentation value_representation,
Node* value) {
- if (base_taggedness == kTaggedBase &&
- field_offset == HeapObject::kMapOffset) {
- return kMapWriteBarrier;
+ WriteBarrierKind write_barrier_kind =
+ WriteBarrierKindFor(base_taggedness, field_representation, field_type,
+ value_representation, value);
+ if (write_barrier_kind != kNoWriteBarrier) {
+ if (base_taggedness == kTaggedBase &&
+ field_offset == HeapObject::kMapOffset) {
+ write_barrier_kind = kMapWriteBarrier;
+ }
}
- return WriteBarrierKindFor(base_taggedness, field_representation,
- field_type, value_representation, value);
+ return write_barrier_kind;
}
Graph* graph() const { return jsgraph_->graph(); }
@@ -1321,13 +1328,14 @@ class RepresentationSelector {
!right_feedback_type->Maybe(Type::MinusZero())) {
left_identify_zeros = kIdentifyZeros;
}
- UseInfo left_use =
- CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
+ UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+ left_identify_zeros);
// For CheckedInt32Add and CheckedInt32Sub, we don't need to do
// a minus zero check for the right hand side, since we already
// know that the left hand side is a proper Signed32 value,
// potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+ kIdentifyZeros);
VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
Type::Signed32());
}
@@ -1357,7 +1365,7 @@ class RepresentationSelector {
}
// default case => Float64Add/Sub
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) {
ChangeToPureOp(node, Float64Op(node));
@@ -1456,7 +1464,7 @@ class RepresentationSelector {
return;
}
// default case => Float64Mod
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1717,9 +1725,10 @@ class RepresentationSelector {
// on Oddballs, so make sure we don't accidentially sneak in a
// hint with Oddball feedback here.
DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
- // Fallthrough
+ V8_FALLTHROUGH;
case NumberOperationHint::kNumber:
- VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ VisitBinop(node,
+ CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair()),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1792,7 +1801,8 @@ class RepresentationSelector {
}
// Checked float64 x float64 => float64
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1886,7 +1896,8 @@ class RepresentationSelector {
}
// default case => Float64Div
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -2320,7 +2331,6 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kClassOf:
case IrOpcode::kTypeOf: {
return VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
@@ -2352,16 +2362,25 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, simplified()->SeqStringCharCodeAt());
}
} else {
- // TODO(turbofan): Allow builtins to return untagged values.
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ MachineRepresentation::kWord32);
}
return;
}
case IrOpcode::kStringCodePointAt: {
- // TODO(turbofan): Allow builtins to return untagged values.
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ Type* string_type = TypeOf(node->InputAt(0));
+ if (string_type->Is(Type::SeqString())) {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ UnicodeEncoding encoding = UnicodeEncodingOf(node->op());
+ NodeProperties::ChangeOp(
+ node, simplified()->SeqStringCodePointAt(encoding));
+ }
+ } else {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ }
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2389,6 +2408,14 @@ class RepresentationSelector {
MachineRepresentation::kTaggedSigned);
return;
}
+ case IrOpcode::kStringSubstring: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessInput(node, 1, UseInfo::TruncatingWord32());
+ ProcessInput(node, 2, UseInfo::TruncatingWord32());
+ ProcessRemainingInputs(node, 3);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2396,6 +2423,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckBounds: {
+ const CheckParameters& p = CheckParametersOf(node->op());
Type* index_type = TypeOf(node->InputAt(0));
Type* length_type = TypeOf(node->InputAt(1));
if (index_type->Is(Type::Integral32OrMinusZero())) {
@@ -2414,9 +2442,10 @@ class RepresentationSelector {
}
}
} else {
- VisitBinop(node, UseInfo::CheckedSigned32AsWord32(kIdentifyZeros),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ VisitBinop(
+ node,
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, p.feedback()),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
}
return;
}
@@ -2698,17 +2727,20 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeToNumber: {
- NumberOperationHint const hint = NumberOperationHintOf(node->op());
- switch (hint) {
+ NumberOperationParameters const& p =
+ NumberOperationParametersOf(node->op());
+ switch (p.hint()) {
case NumberOperationHint::kSigned32:
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- VisitUnop(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitUnop(node,
+ CheckedUseInfoAsWord32FromHint(p.hint(), p.feedback()),
MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
case NumberOperationHint::kNumberOrOddball:
- VisitUnop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ VisitUnop(node,
+ CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
MachineRepresentation::kFloat64);
break;
}
@@ -3040,10 +3072,10 @@ class RepresentationSelector {
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
case IrOpcode::kDeadValue:
+ ProcessInput(node, 0, UseInfo::Any());
return SetOutput(node, MachineRepresentation::kNone);
default:
- V8_Fatal(
- __FILE__, __LINE__,
+ FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
node->opcode(), node->op()->mnemonic(), node->id());
break;
@@ -3621,14 +3653,14 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
// General case for unsigned integer modulus, with optimization for (unknown)
// power of 2 right hand side.
//
- // if rhs then
+ // if rhs == 0 then
+ // zero
+ // else
// msk = rhs - 1
// if rhs & msk != 0 then
// lhs % rhs
// else
// lhs & msk
- // else
- // zero
//
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
@@ -3636,16 +3668,20 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
const Operator* const phi_op =
common()->Phi(MachineRepresentation::kWord32, 2);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
+ Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
graph()->start());
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* true0;
+ Node* true0 = zero;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0;
{
Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
@@ -3653,13 +3689,10 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
- if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
- true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* false0 = zero;
-
Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
return graph()->NewNode(phi_op, true0, false0, merge0);
}
@@ -3809,10 +3842,10 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kNoProperties);
- to_number_operator_.set(common()->Call(desc));
+ to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
}
@@ -3821,10 +3854,10 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kNoProperties);
- to_numeric_operator_.set(common()->Call(desc));
+ to_numeric_operator_.set(common()->Call(call_descriptor));
}
return to_numeric_operator_.get();
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9978bae122..f4802a96d0 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -258,11 +258,6 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
UNREACHABLE();
}
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
- return OpParameter<CheckTaggedInputMode>(op);
-}
-
std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
switch (mode) {
case GrowFastElementsMode::kDoubleElements:
@@ -487,8 +482,7 @@ size_t hash_value(NumberOperationHint hint) {
}
NumberOperationHint NumberOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kSpeculativeToNumber ||
- op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
@@ -507,6 +501,25 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
+bool operator==(NumberOperationParameters const& lhs,
+ NumberOperationParameters const& rhs) {
+ return lhs.hint() == rhs.hint() && lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(NumberOperationParameters const& p) {
+ return base::hash_combine(p.hint(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, NumberOperationParameters const& p) {
+ return os << p.hint() << " " << p.feedback();
+}
+
+NumberOperationParameters const& NumberOperationParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kSpeculativeToNumber, op->opcode());
+ return OpParameter<NumberOperationParameters>(op);
+}
+
size_t hash_value(AllocateParameters info) {
return base::hash_combine(info.type(), info.pretenure());
}
@@ -537,7 +550,9 @@ Type* AllocateTypeOf(const Operator* op) {
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kStringFromCodePoint, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint ||
+ op->opcode() == IrOpcode::kStringCodePointAt ||
+ op->opcode() == IrOpcode::kSeqStringCodePointAt);
return OpParameter<UnicodeEncoding>(op);
}
@@ -553,7 +568,8 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32 ||
+ op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
return OpParameter<CheckTaggedInputParameters>(op);
}
@@ -655,18 +671,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringCharAt, Operator::kNoProperties, 2, 1) \
- V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
- V(SeqStringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(ClassOf, Operator::kNoProperties, 1, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -710,6 +720,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NewConsString, Operator::kNoProperties, 3, 0) \
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
+#define EFFECT_DEPENDENT_OP_LIST(V) \
+ V(StringCharAt, Operator::kNoProperties, 2, 1) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringSubstring, Operator::kNoProperties, 3, 1)
+
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberEqual) \
@@ -755,6 +771,20 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE)
#undef PURE
+#define EFFECT_DEPENDENT(Name, properties, value_input_count, \
+ control_input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kNoDeopt | Operator::kNoWrite | \
+ Operator::kNoThrow | properties, \
+ #Name, value_input_count, 1, control_input_count, 1, 1, \
+ 0) {} \
+ }; \
+ Name##Operator k##Name;
+ EFFECT_DEPENDENT_OP_LIST(EFFECT_DEPENDENT)
+#undef EFFECT_DEPENDENT
+
#define CHECKED(Name, value_input_count, value_output_count) \
struct Name##Operator final : public Operator { \
Name##Operator() \
@@ -791,6 +821,33 @@ struct SimplifiedOperatorGlobalCache final {
#undef CHECK_IF
template <UnicodeEncoding kEncoding>
+ struct StringCodePointAtOperator final : public Operator1<UnicodeEncoding> {
+ StringCodePointAtOperator()
+ : Operator1<UnicodeEncoding>(IrOpcode::kStringCodePointAt,
+ Operator::kFoldable | Operator::kNoThrow,
+ "StringCodePointAt", 2, 1, 1, 1, 1, 0,
+ kEncoding) {}
+ };
+ StringCodePointAtOperator<UnicodeEncoding::UTF16>
+ kStringCodePointAtOperatorUTF16;
+ StringCodePointAtOperator<UnicodeEncoding::UTF32>
+ kStringCodePointAtOperatorUTF32;
+
+ template <UnicodeEncoding kEncoding>
+ struct SeqStringCodePointAtOperator final
+ : public Operator1<UnicodeEncoding> {
+ SeqStringCodePointAtOperator()
+ : Operator1<UnicodeEncoding>(IrOpcode::kSeqStringCodePointAt,
+ Operator::kFoldable | Operator::kNoThrow,
+ "SeqStringCodePointAt", 2, 1, 1, 1, 1, 0,
+ kEncoding) {}
+ };
+ SeqStringCodePointAtOperator<UnicodeEncoding::UTF16>
+ kSeqStringCodePointAtOperatorUTF16;
+ SeqStringCodePointAtOperator<UnicodeEncoding::UTF32>
+ kSeqStringCodePointAtOperatorUTF32;
+
+ template <UnicodeEncoding kEncoding>
struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
StringFromCodePointOperator()
: Operator1<UnicodeEncoding>(IrOpcode::kStringFromCodePoint,
@@ -891,12 +948,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckTaggedInputMode kMode>
struct CheckedTaggedToFloat64Operator final
- : public Operator1<CheckTaggedInputMode> {
+ : public Operator1<CheckTaggedInputParameters> {
CheckedTaggedToFloat64Operator()
- : Operator1<CheckTaggedInputMode>(
+ : Operator1<CheckTaggedInputParameters>(
IrOpcode::kCheckedTaggedToFloat64,
Operator::kFoldable | Operator::kNoThrow,
- "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0, kMode) {}
+ "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0,
+ CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
};
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
kCheckedTaggedToFloat64NumberOperator;
@@ -1004,14 +1062,13 @@ struct SimplifiedOperatorGlobalCache final {
template <NumberOperationHint kHint>
struct SpeculativeToNumberOperator final
- : public Operator1<NumberOperationHint> {
+ : public Operator1<NumberOperationParameters> {
SpeculativeToNumberOperator()
- : Operator1<NumberOperationHint>(
- IrOpcode::kSpeculativeToNumber, // opcode
- Operator::kFoldable | Operator::kNoThrow, // flags
- "SpeculativeToNumber", // name
- 1, 1, 1, 1, 1, 0, // counts
- kHint) {} // parameter
+ : Operator1<NumberOperationParameters>(
+ IrOpcode::kSpeculativeToNumber,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeToNumber",
+ 1, 1, 1, 1, 1, 0,
+ NumberOperationParameters(kHint, VectorSlotPair())) {}
};
SpeculativeToNumberOperator<NumberOperationHint::kSignedSmall>
kSpeculativeToNumberSignedSmallOperator;
@@ -1032,6 +1089,7 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
#define GET_FROM_CACHE(Name, ...) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
+EFFECT_DEPENDENT_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
@@ -1140,14 +1198,19 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
- CheckTaggedInputMode mode) {
- switch (mode) {
- case CheckTaggedInputMode::kNumber:
- return &cache_.kCheckedTaggedToFloat64NumberOperator;
- case CheckTaggedInputMode::kNumberOrOddball:
- return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTaggedToFloat64NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckTaggedInputParameters>(
+ IrOpcode::kCheckedTaggedToFloat64,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToFloat64", 1, 1,
+ 1, 1, 1, 0, CheckTaggedInputParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
@@ -1222,20 +1285,25 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
- NumberOperationHint hint) {
- switch (hint) {
- case NumberOperationHint::kSignedSmall:
- return &cache_.kSpeculativeToNumberSignedSmallOperator;
- case NumberOperationHint::kSignedSmallInputs:
- break;
- case NumberOperationHint::kSigned32:
- return &cache_.kSpeculativeToNumberSigned32Operator;
- case NumberOperationHint::kNumber:
- return &cache_.kSpeculativeToNumberNumberOperator;
- case NumberOperationHint::kNumberOrOddball:
- return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
+ NumberOperationHint hint, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return &cache_.kSpeculativeToNumberSignedSmallOperator;
+ case NumberOperationHint::kSignedSmallInputs:
+ break;
+ case NumberOperationHint::kSigned32:
+ return &cache_.kSpeculativeToNumberSigned32Operator;
+ case NumberOperationHint::kNumber:
+ return &cache_.kSpeculativeToNumberNumberOperator;
+ case NumberOperationHint::kNumberOrOddball:
+ return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<NumberOperationParameters>(
+ IrOpcode::kSpeculativeToNumber, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeToNumber", 1, 1, 1, 1, 1, 0,
+ NumberOperationParameters(hint, feedback));
}
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
@@ -1378,6 +1446,28 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
"AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, pretenure));
}
+const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
+ UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return &cache_.kStringCodePointAtOperatorUTF16;
+ case UnicodeEncoding::UTF32:
+ return &cache_.kStringCodePointAtOperatorUTF32;
+ }
+ UNREACHABLE();
+}
+
+const Operator* SimplifiedOperatorBuilder::SeqStringCodePointAt(
+ UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return &cache_.kSeqStringCodePointAtOperatorUTF16;
+ case UnicodeEncoding::UTF32:
+ return &cache_.kSeqStringCodePointAtOperatorUTF32;
+ }
+ UNREACHABLE();
+}
+
const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
UnicodeEncoding encoding) {
switch (encoding) {
@@ -1463,6 +1553,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
}
#undef PURE_OP_LIST
+#undef EFFECT_DEPENDENT_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
#undef CHECKED_WITH_FEEDBACK_OP_LIST
#undef CHECKED_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 10961cf452..09a1fed476 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -134,8 +134,6 @@ size_t hash_value(CheckTaggedInputMode);
std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*);
-
class CheckTaggedInputParameters {
public:
CheckTaggedInputParameters(CheckTaggedInputMode mode,
@@ -353,6 +351,28 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
+class NumberOperationParameters {
+ public:
+ NumberOperationParameters(NumberOperationHint hint,
+ const VectorSlotPair& feedback)
+ : hint_(hint), feedback_(feedback) {}
+
+ NumberOperationHint hint() const { return hint_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ NumberOperationHint hint_;
+ VectorSlotPair feedback_;
+};
+
+size_t hash_value(NumberOperationParameters const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ const NumberOperationParameters&);
+bool operator==(NumberOperationParameters const&,
+ NumberOperationParameters const&);
+const NumberOperationParameters& NumberOperationParametersOf(const Operator* op)
+ WARN_UNUSED_RESULT;
+
int FormalParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
bool IsRestLengthOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -494,7 +514,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SameValue();
const Operator* TypeOf();
- const Operator* ClassOf();
const Operator* ToBoolean();
@@ -504,19 +523,21 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* SeqStringCharCodeAt();
- const Operator* StringCodePointAt();
- const Operator* SeqStringCodePointAt();
+ const Operator* StringCodePointAt(UnicodeEncoding encoding);
+ const Operator* SeqStringCodePointAt(UnicodeEncoding encoding);
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
+ const Operator* StringSubstring();
const Operator* FindOrderedHashMapEntry();
const Operator* FindOrderedHashMapEntryForInt32Key();
- const Operator* SpeculativeToNumber(NumberOperationHint hint);
+ const Operator* SpeculativeToNumber(NumberOperationHint hint,
+ const VectorSlotPair& feedback);
const Operator* StringToNumber();
const Operator* PlainPrimitiveToNumber();
@@ -570,7 +591,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Sub();
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
- const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 672acb203d..fac466c36a 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -27,12 +27,11 @@ namespace compiler {
// expression will be evaluated at runtime. If it evaluates to false, then an
// error message will be shown containing the condition, as well as the extra
// info formatted like with printf.
-#define CHECK_EXTRA(condition, fmt, ...) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s. Extra info: " fmt, \
- #condition, ##__VA_ARGS__); \
- } \
+#define CHECK_EXTRA(condition, fmt, ...) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ FATAL("Check failed: %s. Extra info: " fmt, #condition, ##__VA_ARGS__); \
+ } \
} while (0)
#ifdef DEBUG
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 428688abde..ba82536d3d 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -36,6 +36,8 @@ class TypeCache final {
Type* const kUint32 = Type::Unsigned32();
Type* const kFloat32 = Type::Number();
Type* const kFloat64 = Type::Number();
+ Type* const kBigInt64 = Type::BigInt();
+ Type* const kBigUint64 = Type::BigInt();
Type* const kHoleySmi =
Type::Union(Type::SignedSmall(), Type::Hole(), zone());
@@ -95,8 +97,8 @@ class TypeCache final {
// [0, kMaxUInt32].
Type* const kJSArrayLengthType = Type::Unsigned32();
- // The JSTyped::length property always contains a tagged number in the range
- // [0, kMaxSmiValue].
+ // The JSTypedArray::length property always contains a tagged number in the
+ // range [0, kMaxSmiValue].
Type* const kJSTypedArrayLengthType = Type::UnsignedSmall();
// The String::length property always contains a smi in the range
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 12c9a194b8..418fc17859 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1150,10 +1150,6 @@ Type* Typer::Visitor::TypeJSNegate(Node* node) {
return TypeUnaryOp(node, Negate);
}
-Type* Typer::Visitor::TypeClassOf(Node* node) {
- return Type::InternalizedStringOrNull();
-}
-
Type* Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
@@ -1229,10 +1225,18 @@ Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreateStringIterator(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreatePromise(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
return Type::Array();
}
@@ -1576,8 +1580,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kStringToString:
case kStringToUpperCase:
case kStringTrim:
- case kStringTrimLeft:
- case kStringTrimRight:
+ case kStringTrimEnd:
+ case kStringTrimStart:
case kStringValueOf:
return Type::String();
@@ -1768,8 +1772,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToObject);
case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
- case Runtime::kInlineClassOf:
- return Type::InternalizedStringOrNull();
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
default:
@@ -1822,6 +1824,10 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
return Type::SignedSmall();
}
+Type* Typer::Visitor::TypeJSGeneratorRestoreContext(Node* node) {
+ return Type::Any();
+}
+
Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
return Type::Any();
}
@@ -1834,6 +1840,26 @@ Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSFulfillPromise(Node* node) {
+ return Type::Undefined();
+}
+
+Type* Typer::Visitor::TypeJSPerformPromiseThen(Node* node) {
+ return Type::Receiver();
+}
+
+Type* Typer::Visitor::TypeJSPromiseResolve(Node* node) {
+ return Type::Receiver();
+}
+
+Type* Typer::Visitor::TypeJSRejectPromise(Node* node) {
+ return Type::Undefined();
+}
+
+Type* Typer::Visitor::TypeJSResolvePromise(Node* node) {
+ return Type::Undefined();
+}
+
// Simplified operators.
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1936,9 +1962,13 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
-Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) {
+ return Type::String();
+}
-Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) {
+ return Type::String();
+}
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
@@ -1972,6 +2002,8 @@ Type* Typer::Visitor::TypeStringLength(Node* node) {
return typer_->cache_.kStringLengthType;
}
+Type* Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
+
Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index a3e90d579a..3e3dbbe769 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -273,9 +273,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case BYTECODE_ARRAY_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
+ case FEEDBACK_CELL_TYPE:
case FEEDBACK_VECTOR_TYPE:
case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
+ case SCOPE_INFO_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
@@ -299,8 +301,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
- case PROMISE_REACTION_JOB_INFO_TYPE:
+ case PROMISE_CAPABILITY_TYPE:
+ case PROMISE_REACTION_TYPE:
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case WEAK_CELL_TYPE:
@@ -314,6 +316,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
case CODE_DATA_CONTAINER_TYPE:
+ case CALLBACK_TASK_TYPE:
+ case CALLABLE_TASK_TYPE:
+ case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
+ case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
+ case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
UNREACHABLE();
}
UNREACHABLE();
@@ -610,11 +617,6 @@ bool UnionType::Wellformed() {
// -----------------------------------------------------------------------------
// Union and intersection
-static bool AddIsSafe(int x, int y) {
- return x >= 0 ? y <= std::numeric_limits<int>::max() - x
- : y >= std::numeric_limits<int>::min() - x;
-}
-
Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
@@ -642,10 +644,9 @@ Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
+ int size;
+ if (base::bits::SignedAddOverflow32(size1, size2, &size)) return Any();
+ if (base::bits::SignedAddOverflow32(size, 2, &size)) return Any();
Type* result_type = UnionType::New(size, zone);
UnionType* result = result_type->AsUnion();
size = 0;
@@ -844,10 +845,9 @@ Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
// Slow case: create union.
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
+ int size;
+ if (base::bits::SignedAddOverflow32(size1, size2, &size)) return Any();
+ if (base::bits::SignedAddOverflow32(size, 2, &size)) return Any();
Type* result_type = UnionType::New(size, zone);
UnionType* result = result_type->AsUnion();
size = 0;
diff --git a/deps/v8/src/compiler/unwinding-info-writer.h b/deps/v8/src/compiler/unwinding-info-writer.h
index 86f5e9e800..723b6f9ec2 100644
--- a/deps/v8/src/compiler/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/unwinding-info-writer.h
@@ -52,4 +52,4 @@ class UnwindingInfoWriter {
#endif
-#endif
+#endif // V8_COMPILER_UNWINDING_INFO_WRITER_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a66a73f5d3..e9a5be6f65 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -191,20 +191,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
}
if (discovered_if_success && !discovered_if_exception) {
- V8_Fatal(__FILE__, __LINE__,
- "#%d:%s should be followed by IfSuccess/IfException, but is "
- "only followed by single #%d:%s",
- node->id(), node->op()->mnemonic(),
- discovered_if_success->id(),
- discovered_if_success->op()->mnemonic());
+ FATAL(
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(), discovered_if_success->id(),
+ discovered_if_success->op()->mnemonic());
}
if (discovered_if_exception && !discovered_if_success) {
- V8_Fatal(__FILE__, __LINE__,
- "#%d:%s should be followed by IfSuccess/IfException, but is "
- "only followed by single #%d:%s",
- node->id(), node->op()->mnemonic(),
- discovered_if_exception->id(),
- discovered_if_exception->op()->mnemonic());
+ FATAL(
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(), discovered_if_exception->id(),
+ discovered_if_exception->op()->mnemonic());
}
if (discovered_if_success || discovered_if_exception) {
CHECK_EQ(2, total_number_of_control_uses);
@@ -268,12 +266,13 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
}
case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- CHECK_EQ(IrOpcode::kBranch,
- NodeProperties::GetControlInput(node, 0)->opcode());
+ case IrOpcode::kIfFalse: {
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(IrOpcode::kBranch, control->opcode());
// Type is empty.
CheckNotTyped(node);
break;
+ }
case IrOpcode::kIfSuccess: {
// IfSuccess and IfException continuation only on throwing nodes.
Node* input = NodeProperties::GetControlInput(node, 0);
@@ -311,8 +310,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
}
default: {
- V8_Fatal(__FILE__, __LINE__, "Switch #%d illegally used by #%d:%s",
- node->id(), use->id(), use->op()->mnemonic());
+ FATAL("Switch #%d illegally used by #%d:%s", node->id(), use->id(),
+ use->op()->mnemonic());
break;
}
}
@@ -572,6 +571,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
case IrOpcode::kObjectId:
CheckTypeIs(node, Type::Object());
+ break;
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
case IrOpcode::kArgumentsElementsState:
@@ -677,10 +677,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateStringIterator:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateKeyValueArray:
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreatePromise:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateLiteralArray:
// Type is Array.
CheckTypeIs(node, Type::Array());
@@ -742,10 +750,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kClassOf:
- // Type is InternaliedString \/ Null.
- CheckTypeIs(node, Type::InternalizedStringOrNull());
- break;
case IrOpcode::kTypeOf:
// Type is InternalizedString.
CheckTypeIs(node, Type::InternalizedString());
@@ -831,6 +835,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kJSGeneratorRestoreContext:
+ CheckTypeIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSGeneratorRestoreRegister:
CheckTypeIs(node, Type::Any());
break;
@@ -845,6 +853,35 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kJSFulfillPromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+ case IrOpcode::kJSPerformPromiseThen:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Any());
+ CheckValueInputIs(node, 3, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
+ case IrOpcode::kJSPromiseResolve:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
+ case IrOpcode::kJSRejectPromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+ case IrOpcode::kJSResolvePromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+
case IrOpcode::kComment:
case IrOpcode::kDebugAbort:
case IrOpcode::kDebugBreak:
@@ -1097,7 +1134,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::String());
CheckTypeIs(node, Type::String());
break;
-
+ case IrOpcode::kStringSubstring:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::SignedSmall());
+ CheckValueInputIs(node, 2, Type::SignedSmall());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kReferenceEqual:
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
@@ -1471,6 +1513,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Machine operators
// -----------------------
case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1630,21 +1673,27 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
+ case IrOpcode::kSpeculationPoison:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kAtomicLoad:
- case IrOpcode::kAtomicStore:
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicCompareExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
case IrOpcode::kSpeculationFence:
+ case IrOpcode::kSignExtendWord8ToInt32:
+ case IrOpcode::kSignExtendWord16ToInt32:
+ case IrOpcode::kSignExtendWord8ToInt64:
+ case IrOpcode::kSignExtendWord16ToInt64:
+ case IrOpcode::kSignExtendWord32ToInt64:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
@@ -1673,9 +1722,8 @@ void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs,
other->opcode() == IrOpcode::kProjection &&
other->InputAt(0) == node &&
ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s has duplicate projections #%d and #%d",
- node->id(), node->op()->mnemonic(), proj->id(), other->id());
+ FATAL("Node #%d:%s has duplicate projections #%d and #%d", node->id(),
+ node->op()->mnemonic(), proj->id(), other->id());
}
}
}
@@ -1726,10 +1774,9 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
Node* input = node->InputAt(j);
if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
use_pos)) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
- node->id(), node->op()->mnemonic(), block->rpo_number(), j,
- input->id(), input->op()->mnemonic());
+ FATAL("Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
+ node->id(), node->op()->mnemonic(), block->rpo_number(), j,
+ input->id(), input->op()->mnemonic());
}
}
// Ensure that nodes are dominated by their control inputs;
@@ -1739,10 +1786,9 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
node->opcode() != IrOpcode::kEnd) {
Node* ctl = NodeProperties::GetControlInput(node);
if (!Dominates(schedule, ctl, node)) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s in B%d is not dominated by control input #%d:%s",
- node->id(), node->op()->mnemonic(), block->rpo_number(),
- ctl->id(), ctl->op()->mnemonic());
+ FATAL("Node #%d:%s in B%d is not dominated by control input #%d:%s",
+ node->id(), node->op()->mnemonic(), block->rpo_number(), ctl->id(),
+ ctl->op()->mnemonic());
}
}
}
@@ -1835,8 +1881,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BitVector* block_doms = dominators[block->id().ToSize()];
BasicBlock* idom = block->dominator();
if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
- V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
- block->rpo_number(), idom->rpo_number());
+ FATAL("Block B%d is not dominated by B%d", block->rpo_number(),
+ idom->rpo_number());
}
for (size_t s = 0; s < block->SuccessorCount(); s++) {
BasicBlock* succ = block->SuccessorAt(s);
@@ -1872,9 +1918,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
schedule->GetBlockById(BasicBlock::Id::FromInt(it.Current()));
if (dom != idom &&
!dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
- V8_Fatal(__FILE__, __LINE__,
- "Block B%d is not immediately dominated by B%d",
- block->rpo_number(), idom->rpo_number());
+ FATAL("Block B%d is not immediately dominated by B%d",
+ block->rpo_number(), idom->rpo_number());
}
}
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 9bbf5f3a3f..080479a010 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
+#include "src/base/v8-fallthrough.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
@@ -53,9 +54,9 @@ namespace compiler {
#define WASM_64 0
#endif
-#define FATAL_UNSUPPORTED_OPCODE(opcode) \
- V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", (opcode), \
- wasm::WasmOpcodes::OpcodeName(opcode));
+#define FATAL_UNSUPPORTED_OPCODE(opcode) \
+ FATAL("Unsupported opcode 0x%x:%s", (opcode), \
+ wasm::WasmOpcodes::OpcodeName(opcode));
namespace {
@@ -227,14 +228,14 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Handle<Code> code = BUILTIN_CODE(jsgraph()->isolate(), WasmStackGuard);
CallInterfaceDescriptor idesc =
WasmRuntimeCallDescriptor(jsgraph()->isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), 1, Linkage::kNoContext);
Node* stub_code = jsgraph()->HeapConstant(code);
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- *effect, stack_check.if_false);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor),
+ stub_code, *effect, stack_check.if_false);
SetSourcePosition(call, position);
@@ -563,16 +564,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF64Sqrt:
op = m->Float64Sqrt();
break;
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32SConvertSatF64:
- return BuildI32SConvertF64(input, position,
- NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF64:
case wasm::kExprI32UConvertSatF64:
- return BuildI32UConvertF64(input, position,
- NumericImplementation::kSaturate);
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32UConvertSatF32:
+ return BuildIntConvertFloat(input, position, opcode);
case wasm::kExprI32AsmjsSConvertF64:
return BuildI32AsmjsSConvertF64(input);
case wasm::kExprI32AsmjsUConvertF64:
@@ -592,16 +592,6 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF32UConvertI32:
op = m->RoundUint32ToFloat32();
break;
- case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32SConvertSatF32:
- return BuildI32SConvertF32(input, position,
- NumericImplementation::kSaturate);
- case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32UConvertSatF32:
- return BuildI32UConvertF32(input, position,
- NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF32:
return BuildI32AsmjsSConvertF32(input);
case wasm::kExprI32AsmjsUConvertF32:
@@ -780,14 +770,32 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
}
op = m->RoundUint64ToFloat64();
break;
+ case wasm::kExprI32SExtendI8:
+ op = m->SignExtendWord8ToInt32();
+ break;
+ case wasm::kExprI32SExtendI16:
+ op = m->SignExtendWord16ToInt32();
+ break;
+ case wasm::kExprI64SExtendI8:
+ op = m->SignExtendWord8ToInt64();
+ break;
+ case wasm::kExprI64SExtendI16:
+ op = m->SignExtendWord16ToInt64();
+ break;
+ case wasm::kExprI64SExtendI32:
+ op = m->SignExtendWord32ToInt64();
+ break;
case wasm::kExprI64SConvertF32:
- return BuildI64SConvertF32(input, position);
- case wasm::kExprI64SConvertF64:
- return BuildI64SConvertF64(input, position);
case wasm::kExprI64UConvertF32:
- return BuildI64UConvertF32(input, position);
+ case wasm::kExprI64SConvertF64:
case wasm::kExprI64UConvertF64:
- return BuildI64UConvertF64(input, position);
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return jsgraph()->machine()->Is32()
+ ? BuildCcallConvertFloat(input, position, opcode)
+ : BuildIntConvertFloat(input, position, opcode);
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1037,12 +1045,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case wasm::kWasmI64:
result = jsgraph()->Int64Constant(0);
break;
case wasm::kWasmF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case wasm::kWasmI32:
result = jsgraph()->Int32Constant(0);
break;
@@ -1184,12 +1194,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
case MachineRepresentation::kFloat64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case MachineRepresentation::kWord64:
result = jsgraph()->Int64Constant(0);
break;
case MachineRepresentation::kFloat32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord16:
result = jsgraph()->Int32Constant(0);
@@ -1373,167 +1385,281 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
#endif
}
-// Helper classes for float to int conversions.
-struct WasmGraphBuilder::IntConvertOps {
- MachineRepresentation word_rep() const {
- return MachineRepresentation::kWord32;
- }
- Node* zero() const { return builder_->Int32Constant(0); }
- virtual Node* min() const = 0;
- virtual Node* max() const = 0;
- virtual ~IntConvertOps() = default;
-
- protected:
- explicit IntConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
- WasmGraphBuilder* builder_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(IntConvertOps);
-};
-
-struct I32SConvertOps final : public WasmGraphBuilder::IntConvertOps {
- explicit I32SConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::IntConvertOps(builder) {}
- ~I32SConvertOps() = default;
- Node* min() const {
- return builder_->Int32Constant(std::numeric_limits<int32_t>::min());
- }
- Node* max() const {
- return builder_->Int32Constant(std::numeric_limits<int32_t>::max());
- }
- DISALLOW_IMPLICIT_CONSTRUCTORS(I32SConvertOps);
-};
-
-struct I32UConvertOps final : public WasmGraphBuilder::IntConvertOps {
- explicit I32UConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::IntConvertOps(builder) {}
- ~I32UConvertOps() = default;
- Node* min() const {
- return builder_->Int32Constant(std::numeric_limits<uint32_t>::min());
- }
- Node* max() const {
- return builder_->Int32Constant(std::numeric_limits<uint32_t>::max());
- }
- DISALLOW_IMPLICIT_CONSTRUCTORS(I32UConvertOps);
-};
-
-struct WasmGraphBuilder::FloatConvertOps {
- virtual Node* zero() const = 0;
- virtual wasm::WasmOpcode trunc_op() const = 0;
- virtual wasm::WasmOpcode ne_op() const = 0;
- virtual wasm::WasmOpcode lt_op() const = 0;
- virtual ~FloatConvertOps() = default;
-
- protected:
- explicit FloatConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
- WasmGraphBuilder* builder_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FloatConvertOps);
-};
-
-struct F32ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
- explicit F32ConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::FloatConvertOps(builder) {}
- ~F32ConvertOps() = default;
- Node* zero() const { return builder_->Float32Constant(0.0); }
- wasm::WasmOpcode trunc_op() const { return wasm::kExprF32Trunc; }
- wasm::WasmOpcode ne_op() const { return wasm::kExprF32Ne; }
- wasm::WasmOpcode lt_op() const { return wasm::kExprF32Lt; }
- DISALLOW_IMPLICIT_CONSTRUCTORS(F32ConvertOps);
-};
-
-struct F64ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
- explicit F64ConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::FloatConvertOps(builder) {}
- ~F64ConvertOps() = default;
- Node* zero() const { return builder_->Float64Constant(0.0); }
- wasm::WasmOpcode trunc_op() const { return wasm::kExprF64Trunc; }
- wasm::WasmOpcode ne_op() const { return wasm::kExprF64Ne; }
- wasm::WasmOpcode lt_op() const { return wasm::kExprF64Lt; }
- DISALLOW_IMPLICIT_CONSTRUCTORS(F64ConvertOps);
-};
-
-Node* WasmGraphBuilder::BuildConvertCheck(Node* test, Node* result, Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops) {
- switch (impl) {
- case NumericImplementation::kTrap:
+namespace {
+
+MachineType IntConvertType(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32SConvertSatF64:
+ return MachineType::Int32();
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI32UConvertSatF64:
+ return MachineType::Uint32();
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ return MachineType::Int64();
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64UConvertSatF64:
+ return MachineType::Uint64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+MachineType FloatConvertType(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ return MachineType::Float32();
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ case wasm::kExprI32UConvertSatF64:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return MachineType::Float64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+const Operator* ConvertOp(WasmGraphBuilder* builder, wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ return builder->jsgraph()->machine()->TruncateFloat32ToInt32();
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ return builder->jsgraph()->machine()->TruncateFloat32ToUint32();
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ return builder->jsgraph()->machine()->ChangeFloat64ToInt32();
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF64:
+ return builder->jsgraph()->machine()->TruncateFloat64ToUint32();
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertSatF32:
+ return builder->jsgraph()->machine()->TryTruncateFloat32ToInt64();
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertSatF32:
+ return builder->jsgraph()->machine()->TryTruncateFloat32ToUint64();
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF64:
+ return builder->jsgraph()->machine()->TryTruncateFloat64ToInt64();
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF64:
+ return builder->jsgraph()->machine()->TryTruncateFloat64ToUint64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode ConvertBackOp(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ return wasm::kExprF32SConvertI32;
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ return wasm::kExprF32UConvertI32;
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ return wasm::kExprF64SConvertI32;
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF64:
+ return wasm::kExprF64UConvertI32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool IsTrappingConvertOp(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64UConvertF64:
+ return true;
+ case wasm::kExprI32SConvertSatF64:
+ case wasm::kExprI32UConvertSatF64:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Zero(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kWord32:
+ return builder->Int32Constant(0);
+ case MachineRepresentation::kWord64:
+ return builder->Int64Constant(0);
+ case MachineRepresentation::kFloat32:
+ return builder->Float32Constant(0.0);
+ case MachineRepresentation::kFloat64:
+ return builder->Float64Constant(0.0);
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Min(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.semantic()) {
+ case MachineSemantic::kInt32:
+ return builder->Int32Constant(std::numeric_limits<int32_t>::min());
+ case MachineSemantic::kUint32:
+ return builder->Int32Constant(std::numeric_limits<uint32_t>::min());
+ case MachineSemantic::kInt64:
+ return builder->Int64Constant(std::numeric_limits<int64_t>::min());
+ case MachineSemantic::kUint64:
+ return builder->Int64Constant(std::numeric_limits<uint64_t>::min());
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Max(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.semantic()) {
+ case MachineSemantic::kInt32:
+ return builder->Int32Constant(std::numeric_limits<int32_t>::max());
+ case MachineSemantic::kUint32:
+ return builder->Int32Constant(std::numeric_limits<uint32_t>::max());
+ case MachineSemantic::kInt64:
+ return builder->Int64Constant(std::numeric_limits<int64_t>::max());
+ case MachineSemantic::kUint64:
+ return builder->Int64Constant(std::numeric_limits<uint64_t>::max());
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode TruncOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Trunc;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Trunc;
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode NeOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Ne;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Ne;
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode LtOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Lt;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Lt;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* ConvertTrapTest(WasmGraphBuilder* builder, wasm::WasmOpcode opcode,
+ const MachineType& int_ty, const MachineType& float_ty,
+ Node* trunc, Node* converted_value) {
+ if (int_ty.representation() == MachineRepresentation::kWord32) {
+ Node* check = builder->Unop(ConvertBackOp(opcode), converted_value);
+ return builder->Binop(NeOp(float_ty), trunc, check);
+ }
+ return builder->graph()->NewNode(builder->jsgraph()->common()->Projection(1),
+ trunc, builder->graph()->start());
+}
+
+Node* ConvertSaturateTest(WasmGraphBuilder* builder, wasm::WasmOpcode opcode,
+ const MachineType& int_ty,
+ const MachineType& float_ty, Node* trunc,
+ Node* converted_value) {
+ Node* test = ConvertTrapTest(builder, opcode, int_ty, float_ty, trunc,
+ converted_value);
+ if (int_ty.representation() == MachineRepresentation::kWord64) {
+ test = builder->Binop(wasm::kExprI64Eq, test, builder->Int64Constant(0));
+ }
+ return test;
+}
+
+} // namespace
+
+Node* WasmGraphBuilder::BuildIntConvertFloat(Node* input,
+ wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode) {
+ const MachineType int_ty = IntConvertType(opcode);
+ const MachineType float_ty = FloatConvertType(opcode);
+ const Operator* conv_op = ConvertOp(this, opcode);
+ Node* trunc = nullptr;
+ Node* converted_value = nullptr;
+ const bool is_int32 =
+ int_ty.representation() == MachineRepresentation::kWord32;
+ if (is_int32) {
+ trunc = Unop(TruncOp(float_ty), input);
+ converted_value = graph()->NewNode(conv_op, trunc);
+ } else {
+ trunc = graph()->NewNode(conv_op, input);
+ converted_value = graph()->NewNode(jsgraph()->common()->Projection(0),
+ trunc, graph()->start());
+ }
+ if (IsTrappingConvertOp(opcode)) {
+ Node* test =
+ ConvertTrapTest(this, opcode, int_ty, float_ty, trunc, converted_value);
+ if (is_int32) {
TrapIfTrue(wasm::kTrapFloatUnrepresentable, test, position);
- return result;
- case NumericImplementation::kSaturate: {
- Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
- tl_d.Chain(*control_);
- Diamond nan_d(graph(), jsgraph()->common(),
- Binop(float_ops->ne_op(), input, input), // Checks if NaN.
- BranchHint::kFalse);
- nan_d.Nest(tl_d, true);
- Diamond sat_d(graph(), jsgraph()->common(),
- Binop(float_ops->lt_op(), input, float_ops->zero()),
- BranchHint::kNone);
- sat_d.Nest(nan_d, false);
- Node* sat_val =
- sat_d.Phi(int_ops->word_rep(), int_ops->min(), int_ops->max());
- Node* nan_val = nan_d.Phi(int_ops->word_rep(), int_ops->zero(), sat_val);
- return tl_d.Phi(int_ops->word_rep(), nan_val, result);
+ } else {
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, test, position);
}
- }
- UNREACHABLE();
-}
-
-Node* WasmGraphBuilder::BuildI32ConvertOp(
- Node* input, wasm::WasmCodePosition position, NumericImplementation impl,
- const Operator* op, wasm::WasmOpcode check_op, const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops) {
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(float_ops->trunc_op(), input);
- Node* result = graph()->NewNode(op, trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we
- // trap/saturate.
- Node* check = Unop(check_op, result);
- Node* overflow = Binop(float_ops->ne_op(), trunc, check);
- return BuildConvertCheck(overflow, result, input, position, impl, int_ops,
- float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32SConvertOps int_ops(this);
- F32ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat32ToInt32(),
- wasm::kExprF32SConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32SConvertOps int_ops(this);
- F64ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->ChangeFloat64ToInt32(),
- wasm::kExprF64SConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32UConvertOps int_ops(this);
- F32ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat32ToUint32(),
- wasm::kExprF32UConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32UConvertOps int_ops(this);
- F64ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat64ToUint32(),
- wasm::kExprF64UConvertI32, &int_ops, &float_ops);
+ return converted_value;
+ }
+ Node* test = ConvertSaturateTest(this, opcode, int_ty, float_ty, trunc,
+ converted_value);
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(Control());
+ Node* nan_test = Binop(NeOp(float_ty), input, input);
+ Diamond nan_d(graph(), jsgraph()->common(), nan_test, BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Node* neg_test = Binop(LtOp(float_ty), input, Zero(this, float_ty));
+ Diamond sat_d(graph(), jsgraph()->common(), neg_test, BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
+ Node* nan_val =
+ nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
+ return tl_d.Phi(int_ty.representation(), nan_val, converted_value);
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
@@ -1797,106 +1923,81 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
return load;
}
-Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
-
-Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
+namespace {
-Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
+ExternalReference convert_ccall_ref(WasmGraphBuilder* builder,
+ wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertSatF32:
+ return ExternalReference::wasm_float32_to_int64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertSatF32:
+ return ExternalReference::wasm_float32_to_uint64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF64:
+ return ExternalReference::wasm_float64_to_int64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF64:
+ return ExternalReference::wasm_float64_to_uint64(
+ builder->jsgraph()->isolate());
+ default:
+ UNREACHABLE();
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
+} // namespace
-Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
- Node* input, ExternalReference ref,
- MachineRepresentation parameter_representation,
- const MachineType result_type, wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
+ wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode) {
+ const MachineType int_ty = IntConvertType(opcode);
+ const MachineType float_ty = FloatConvertType(opcode);
+ ExternalReference call_ref = convert_ccall_ref(this, opcode);
Node* stack_slot_param = graph()->NewNode(
- jsgraph()->machine()->StackSlot(parameter_representation));
+ jsgraph()->machine()->StackSlot(float_ty.representation()));
Node* stack_slot_result = graph()->NewNode(
- jsgraph()->machine()->StackSlot(result_type.representation()));
+ jsgraph()->machine()->StackSlot(int_ty.representation()));
const Operator* store_op = jsgraph()->machine()->Store(
- StoreRepresentation(parameter_representation, kNoWriteBarrier));
- *effect_ =
- graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
+ *effect_ = graph()->NewNode(store_op, stack_slot_param, Int32Constant(0),
+ input, *effect_, *control_);
MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
sig_builder.AddReturn(MachineType::Int32());
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
- Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), function, stack_slot_param,
- stack_slot_result),
- position);
- const Operator* load_op = jsgraph()->machine()->Load(result_type);
- Node* load =
- graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
- *effect_, *control_);
- *effect_ = load;
- return load;
+ Node* function =
+ graph()->NewNode(jsgraph()->common()->ExternalConstant(call_ref));
+ Node* overflow = BuildCCall(sig_builder.Build(), function, stack_slot_param,
+ stack_slot_result);
+ if (IsTrappingConvertOp(opcode)) {
+ ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
+ const Operator* load_op = jsgraph()->machine()->Load(int_ty);
+ Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+ }
+ Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(Control());
+ Node* nan_test = Binop(NeOp(float_ty), input, input);
+ Diamond nan_d(graph(), jsgraph()->common(), nan_test, BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Node* neg_test = Binop(LtOp(float_ty), input, Zero(this, float_ty));
+ Diamond sat_d(graph(), jsgraph()->common(), neg_test, BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
+ const Operator* load_op = jsgraph()->machine()->Load(int_ty);
+ Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0),
+ *effect_, *control_);
+ Node* nan_val =
+ nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
+ return tl_d.Phi(int_ty.representation(), nan_val, load);
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
@@ -1956,13 +2057,13 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
switch (sig->GetParam(i)) {
case wasm::kWasmF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
- // Intentionally fall to next case.
+ V8_FALLTHROUGH;
case wasm::kWasmI32:
BuildEncodeException32BitValue(&index, value);
break;
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
- // Intentionally fall to next case.
+ V8_FALLTHROUGH;
case wasm::kWasmI64: {
Node* upper32 = graph()->NewNode(
m->TruncateInt64ToInt32(),
@@ -2417,10 +2518,10 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
DCHECK_EQ(sizeof...(args), sig->parameter_count());
Node* const call_args[] = {function, args..., *effect_, *control_};
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
- const Operator* op = jsgraph()->common()->Call(desc);
+ const Operator* op = jsgraph()->common()->Call(call_descriptor);
Node* call = graph()->NewNode(op, arraysize(call_args), call_args);
*effect_ = call;
return call;
@@ -2428,8 +2529,12 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
- wasm::WasmCodePosition position) {
- DCHECK_NOT_NULL(wasm_context_);
+ wasm::WasmCodePosition position,
+ Node* wasm_context) {
+ if (wasm_context == nullptr) {
+ DCHECK_NOT_NULL(wasm_context_);
+ wasm_context = wasm_context_.get();
+ }
SetNeedsStackCheck();
const size_t params = sig->parameter_count();
const size_t extra = 3; // wasm_context, effect, and control.
@@ -2440,14 +2545,14 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context_.get();
+ args[1] = wasm_context;
// Add effect and control inputs.
args[params + 2] = *effect_;
args[params + 3] = *control_;
- CallDescriptor* descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
- const Operator* op = jsgraph()->common()->Call(descriptor);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ const Operator* op = jsgraph()->common()->Call(call_descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
SetSourcePosition(call, position);
@@ -2501,37 +2606,89 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
uint32_t table_index = 0;
wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- EnsureFunctionTableNodes();
+ Node* table = nullptr;
+ Node* table_size = nullptr;
+ GetFunctionTableNodes(table_index, &table, &table_size);
MachineOperatorBuilder* machine = jsgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
- Node* size = function_tables_[table_index].size;
- Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ Node* in_bounds =
+ graph()->NewNode(machine->Uint32LessThan(), key, table_size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- Node* table_address = function_tables_[table_index].table_addr;
- Node* table = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
+
+ // Mask the key to prevent SSCA.
+ if (untrusted_code_mitigations_) {
+ // mask = ((key - size) & ~key) >> 31
+ Node* neg_key =
+ graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
+ Node* masked_diff = graph()->NewNode(
+ machine->Word32And(),
+ graph()->NewNode(machine->Int32Sub(), key, table_size), neg_key);
+ Node* mask =
+ graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
+ key = graph()->NewNode(machine->Word32And(), key, mask);
+ }
+
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
+
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ if (WASM_CONTEXT_TABLES) {
+ // The table entries are {IndirectFunctionTableEntry} structs.
+ Node* scaled_key =
+ graph()->NewNode(machine->Int32Mul(), key,
+ Int32Constant(sizeof(IndirectFunctionTableEntry)));
+ const Operator* add = nullptr;
+ if (machine->Is64()) {
+ scaled_key = graph()->NewNode(machine->ChangeInt32ToInt64(), scaled_key);
+ add = machine->Int64Add();
+ } else {
+ add = machine->Int32Add();
+ }
+ Node* entry_address = graph()->NewNode(add, table, scaled_key);
+ Node* loaded_sig = graph()->NewNode(
+ machine->Load(MachineType::Int32()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, sig_id)), *effect_,
+ *control_);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
+ Int32Constant(canonical_sig_num));
+
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+
+ Node* target = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, target)), *effect_,
+ *control_);
+
+ Node* loaded_context = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, context)), *effect_,
+ *control_);
+
+ args[0] = target;
+
+ return BuildWasmCall(sig, args, rets, position, loaded_context);
+ }
+
+ // The table entries are elements of a fixed array.
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2 + 1));
- Node* load_sig =
+ Node* loaded_sig =
graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(), key_offset,
Int32Constant(fixed_offset)),
*effect_, *control_);
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- CHECK_GE(sig_index, 0);
- Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ CHECK_GE(canonical_sig_num, 0);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
jsgraph()->SmiConstant(canonical_sig_num));
+
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
@@ -2540,15 +2697,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
graph()->NewNode(machine->Int32Add(), key_offset,
Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
- if (FLAG_wasm_jit_to_native) {
- Node* address = graph()->NewNode(
- machine->Load(MachineType::Pointer()), entry,
- Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- *effect_, *control_);
- args[0] = address;
- } else {
args[0] = entry;
- }
return BuildWasmCall(sig, args, rets, position);
}
@@ -2701,13 +2850,14 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* js_context) {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoProperties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- node, js_context, *effect_, *control_);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), stub_code,
+ node, js_context, *effect_, *control_);
SetSourcePosition(result, 1);
@@ -2853,10 +3003,10 @@ Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
graph()->start());
if (!allocate_heap_number_operator_.is_set()) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoThrow);
- allocate_heap_number_operator_.set(common->Call(descriptor));
+ allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
target, js_context, effect, control);
@@ -2932,9 +3082,10 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
// We only need a dummy call descriptor.
wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
- CallDescriptor* desc =
+ auto call_descriptor =
GetWasmCallDescriptor(jsgraph()->zone(), dummy_sig_builder.Build());
- *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ *effect_ =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos, args);
Return(jsgraph()->UndefinedConstant());
return;
}
@@ -2957,9 +3108,10 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
args[pos++] = *control_;
// Call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* call =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), count, args);
*effect_ = call;
// Clear the ThreadInWasmFlag
@@ -3017,7 +3169,7 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
- CallDescriptor* desc;
+ CallDescriptor* call_descriptor;
Node* start = Start(wasm_count + 3);
*effect_ = start;
*control_ = start;
@@ -3067,7 +3219,7 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
handle(isolate->heap()->undefined_value(), isolate));
}
- desc = Linkage::GetJSCallDescriptor(
+ call_descriptor = Linkage::GetJSCallDescriptor(
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
@@ -3079,7 +3231,8 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = *effect_;
args[pos++] = *control_;
- call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos,
+ args);
}
}
@@ -3094,9 +3247,9 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = jsgraph()->Constant(
handle(isolate->heap()->undefined_value(), isolate)); // receiver
- desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
- callable.descriptor(), wasm_count + 1,
- CallDescriptor::kNoFlags);
+ call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), wasm_count + 1,
+ CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig_);
@@ -3111,7 +3264,8 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = *effect_;
args[pos++] = *control_;
- call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ call =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos, args);
}
*effect_ = call;
@@ -3173,9 +3327,9 @@ void WasmGraphBuilder::BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code,
args[pos++] = *control_;
// Tail-call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* tail_call =
- graph()->NewNode(jsgraph()->common()->TailCall(desc), count, args);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ Node* tail_call = graph()->NewNode(
+ jsgraph()->common()->TailCall(call_descriptor), count, args);
MergeControlToEnd(jsgraph(), tail_call);
}
@@ -3246,17 +3400,12 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) {
if (HasInt64ParamOrReturn(sig_)) LowerInt64();
}
-void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
+void WasmGraphBuilder::BuildCWasmEntry() {
// Build the start and the JS parameter nodes.
Node* start = Start(CWasmEntryParameters::kNumParameters + 5);
*control_ = start;
*effect_ = start;
- // Create the wasm_context node to pass as parameter.
- DCHECK_NULL(wasm_context_);
- wasm_context_ = jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(wasm_context_address));
-
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* code_obj = nullptr;
if (FLAG_wasm_jit_to_native) {
@@ -3269,6 +3418,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
} else {
code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
}
+ Node* wasm_context = Param(CWasmEntryParameters::kWasmContext + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
@@ -3277,7 +3427,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context_.get();
+ args[pos++] = wasm_context;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3294,10 +3444,10 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
DCHECK_EQ(arg_count, pos);
// Call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* call =
- graph()->NewNode(jsgraph()->common()->Call(desc), arg_count, args);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor),
+ arg_count, args);
*effect_ = call;
// Store the return value.
@@ -3495,20 +3645,44 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
-void WasmGraphBuilder::EnsureFunctionTableNodes() {
- if (function_tables_.size() > 0) return;
- size_t tables_size = env_->function_tables.size();
- for (size_t i = 0; i < tables_size; ++i) {
- wasm::GlobalHandleAddress function_handle_address =
- env_->function_tables[i];
- Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE);
- uint32_t table_size = env_->module->function_tables[i].initial_size;
- Node* size = jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
- function_tables_.push_back({table_addr, size});
+void WasmGraphBuilder::GetFunctionTableNodes(uint32_t table_index, Node** table,
+ Node** table_size) {
+ if (WASM_CONTEXT_TABLES) {
+ // The table address and size are stored in the WasmContext.
+ // Don't bother caching them, since they are only used in indirect calls,
+ // which would cause them to be spilled on the stack anyway.
+ *table = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, table))),
+ *effect_, *control_);
+ *table_size = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, table_size))),
+ *effect_, *control_);
+ } else {
+ // The function table nodes are relocatable constants.
+ if (function_tables_.size() == 0) {
+ size_t tables_size = env_->function_tables.size();
+ for (size_t i = 0; i < tables_size; ++i) {
+ wasm::GlobalHandleAddress function_handle_address =
+ env_->function_tables[i];
+ Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(function_handle_address),
+ RelocInfo::WASM_GLOBAL_HANDLE);
+ uint32_t table_size = env_->module->function_tables[i].initial_size;
+ Node* size = jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ function_tables_.push_back({table_addr, size});
+ }
+ }
+ *table_size = function_tables_[table_index].size;
+ *table =
+ graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ function_tables_[table_index].table_addr,
+ jsgraph()->IntPtrConstant(0), *effect_, *control_);
}
}
@@ -3539,7 +3713,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
Node** parameters,
int parameter_count) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
CallDescriptor::kNoFlags);
// CEntryStubConstant nodes have to be created and cached in the main
@@ -3562,8 +3736,8 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
inputs[count++] = *effect_;
inputs[count++] = *control_;
- Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
- count, inputs);
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->Call(call_descriptor), count, inputs);
*effect_ = node;
return node;
@@ -4370,22 +4544,22 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32) \
- V(I32AtomicSub, Sub, Uint32) \
- V(I32AtomicAnd, And, Uint32) \
- V(I32AtomicOr, Or, Uint32) \
- V(I32AtomicXor, Xor, Uint32) \
- V(I32AtomicExchange, Exchange, Uint32) \
V(I32AtomicAdd8U, Add, Uint8) \
- V(I32AtomicSub8U, Sub, Uint8) \
- V(I32AtomicAnd8U, And, Uint8) \
- V(I32AtomicOr8U, Or, Uint8) \
- V(I32AtomicXor8U, Xor, Uint8) \
- V(I32AtomicExchange8U, Exchange, Uint8) \
V(I32AtomicAdd16U, Add, Uint16) \
+ V(I32AtomicSub, Sub, Uint32) \
+ V(I32AtomicSub8U, Sub, Uint8) \
V(I32AtomicSub16U, Sub, Uint16) \
+ V(I32AtomicAnd, And, Uint32) \
+ V(I32AtomicAnd8U, And, Uint8) \
V(I32AtomicAnd16U, And, Uint16) \
+ V(I32AtomicOr, Or, Uint32) \
+ V(I32AtomicOr8U, Or, Uint8) \
V(I32AtomicOr16U, Or, Uint16) \
+ V(I32AtomicXor, Xor, Uint32) \
+ V(I32AtomicXor8U, Xor, Uint8) \
V(I32AtomicXor16U, Xor, Uint16) \
+ V(I32AtomicExchange, Exchange, Uint32) \
+ V(I32AtomicExchange8U, Exchange, Uint8) \
V(I32AtomicExchange16U, Exchange, Uint16)
#define ATOMIC_TERNARY_LIST(V) \
@@ -4409,15 +4583,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
@@ -4428,7 +4602,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \
}
@@ -4441,22 +4615,22 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ jsgraph()->machine()->Word32AtomicLoad(MachineType::Type()), \
MemBuffer(offset), index, *effect_, *control_); \
break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Word32AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4568,7 +4742,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
@@ -4701,7 +4875,7 @@ Handle<Code> CompileWasmToJSWrapper(
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
@@ -4774,7 +4948,7 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
buffer.Dispose();
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"wasm-to-wasm");
}
@@ -4837,7 +5011,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
}
@@ -4852,8 +5026,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
return code;
}
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
- Address wasm_context_address) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
@@ -4870,7 +5043,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildCWasmEntry(wasm_context_address);
+ builder.BuildCWasmEntry();
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
@@ -5051,7 +5224,7 @@ void WasmCompilationUnit::ExecuteCompilation() {
liftoff_.~LiftoffData();
mode_ = WasmCompilationUnit::CompilationMode::kTurbofan;
new (&tf_) TurbofanData();
- // fall-through
+ V8_FALLTHROUGH;
case WasmCompilationUnit::CompilationMode::kTurbofan:
ExecuteTurbofanCompilation();
break;
@@ -5089,19 +5262,20 @@ void WasmCompilationUnit::ExecuteTurbofanCompilation() {
tf_.compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
// Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor =
+ auto call_descriptor =
GetWasmCallDescriptor(tf_.compilation_zone_.get(), func_body_.sig);
if (tf_.jsgraph_->machine()->Is32()) {
- descriptor =
- GetI32WasmCallDescriptor(tf_.compilation_zone_.get(), descriptor);
+ call_descriptor = GetI32WasmCallDescriptor(tf_.compilation_zone_.get(),
+ call_descriptor);
}
tf_.info_.reset(new CompilationInfo(
GetDebugName(tf_.compilation_zone_.get(), func_name_, func_index_),
tf_.compilation_zone_.get(), Code::WASM_FUNCTION));
tf_.job_.reset(Pipeline::NewWasmCompilationJob(
- tf_.info_.get(), isolate_, tf_.jsgraph_, descriptor, source_positions,
- protected_instructions_.get(), env_->module->origin()));
+ tf_.info_.get(), isolate_, tf_.jsgraph_, call_descriptor,
+ source_positions, protected_instructions_.get(),
+ env_->module->origin()));
ok_ = tf_.job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
@@ -5195,11 +5369,11 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
func_index_,
tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- std::move(protected_instructions_));
+ tf_.job_->compilation_info()->wasm_code_desc()->handler_table_offset,
+ std::move(protected_instructions_), false);
if (!code) {
return WasmCodeWrapper(code);
}
- // TODO(mtrofin): add CodeEventListener call - see the non-native case.
if (FLAG_trace_wasm_decode_time) {
double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
@@ -5207,17 +5381,14 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
codegen_ms);
}
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
+
Handle<ByteArray> source_positions =
tf_.job_->compilation_info()->wasm_code_desc()->source_positions_table;
- MaybeHandle<HandlerTable> handler_table =
- tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
native_module_->compiled_module()->source_positions()->set(
func_index_, *source_positions);
- if (!handler_table.is_null()) {
- native_module_->compiled_module()->handler_table()->set(
- func_index_, *handler_table.ToHandleChecked());
- }
#ifdef ENABLE_DISASSEMBLER
// Note: only do this after setting source positions, as this will be
@@ -5272,8 +5443,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
Handle<Code> code;
code = isolate_->factory()->NewCode(
desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable>(), source_positions,
- MaybeHandle<DeoptimizationData>(), kMovable,
+ source_positions, MaybeHandle<DeoptimizationData>(), kMovable,
0, // stub_key
false, // is_turbofanned
liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
@@ -5287,14 +5457,16 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
PackProtectedInstructions(code);
ret = WasmCodeWrapper(code);
} else {
- // TODO(mtrofin): figure a way to raise events.
- // Consider lifting it to FinishCompilation.
+ // TODO(herhut) Consider lifting it to FinishCompilation.
native_module_->compiled_module()->source_positions()->set(
func_index_, *source_positions);
- ret = WasmCodeWrapper(
+ wasm::WasmCode* code =
native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
func_index_, liftoff_.safepoint_table_offset_,
- std::move(protected_instructions_), true));
+ 0, std::move(protected_instructions_), true);
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
+ ret = WasmCodeWrapper(code);
}
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code || FLAG_print_wasm_code) {
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 22a2e1071e..e23fd4fe14 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -227,6 +227,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
enum CWasmEntryParameters {
kCodeObject,
+ kWasmContext,
kArgumentsBuffer,
// marker:
kNumParameters
@@ -235,8 +236,7 @@ enum CWasmEntryParameters {
// Compiles a stub with JS linkage, taking parameters as described by
// {CWasmEntryParameters}. It loads the wasm parameters from the argument
// buffer and calls the wasm function given as first parameter.
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
- Address wasm_context_address);
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the {WasmContext} are cached between WASM-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -255,8 +255,6 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
- struct IntConvertOps;
- struct FloatConvertOps;
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
@@ -369,12 +367,13 @@ class WasmGraphBuilder {
void BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code_start,
Address new_wasm_context_address);
void BuildWasmInterpreterEntry(uint32_t func_index);
- void BuildCWasmEntry(Address wasm_context_address);
+ void BuildCWasmEntry();
Node* ToJS(Node* node, wasm::ValueType type);
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type);
Node* Invert(Node* node);
- void EnsureFunctionTableNodes();
+ void GetFunctionTableNodes(uint32_t table_index, Node** table,
+ Node** table_size);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -451,8 +450,10 @@ class WasmGraphBuilder {
bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph();
+
private:
- enum class NumericImplementation : uint8_t { kTrap, kSaturate };
static const int kDefaultBufferSize = 16;
Zone* const zone_;
@@ -487,10 +488,6 @@ class WasmGraphBuilder {
compiler::SourcePositionTable* const source_position_table_ = nullptr;
- // Internal helper methods.
- JSGraph* jsgraph() { return jsgraph_; }
- Graph* graph();
-
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
@@ -510,29 +507,14 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position);
+ wasm::WasmCodePosition position,
+ Node* wasm_context = nullptr);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildI32ConvertOp(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl, const Operator* op,
- wasm::WasmOpcode check_op,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops);
- Node* BuildConvertCheck(Node* test, Node* result, Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops);
- Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
+ Node* BuildIntConvertFloat(Node* input, wasm::WasmCodePosition position,
+ wasm::WasmOpcode);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
@@ -567,14 +549,8 @@ class WasmGraphBuilder {
Node* BuildF64SConvertI64(Node* input);
Node* BuildF64UConvertI64(Node* input);
- Node* BuildFloatToIntConversionInstruction(
- Node* input, ExternalReference ref,
- MachineRepresentation parameter_representation,
- const MachineType result_type, wasm::WasmCodePosition position);
- Node* BuildI64SConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64UConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64SConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64UConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildCcallConvertFloat(Node* input, wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode);
Node* BuildI32DivS(Node* left, Node* right, wasm::WasmCodePosition position);
Node* BuildI32RemS(Node* left, Node* right, wasm::WasmCodePosition position);
@@ -657,9 +633,9 @@ constexpr int kWasmContextParameterIndex = 0;
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* signature);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
- Zone* zone, CallDescriptor* descriptor);
+ Zone* zone, CallDescriptor* call_descriptor);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* descriptor);
+ Zone* zone, CallDescriptor* call_descriptor);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index e7bb3c164a..cef127f334 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -280,77 +280,77 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
}
CallDescriptor* ReplaceTypeInCallDescriptorWith(
- Zone* zone, CallDescriptor* descriptor, size_t num_replacements,
+ Zone* zone, CallDescriptor* call_descriptor, size_t num_replacements,
MachineType input_type, MachineRepresentation output_type) {
- size_t parameter_count = descriptor->ParameterCount();
- size_t return_count = descriptor->ReturnCount();
- for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == input_type) {
+ size_t parameter_count = call_descriptor->ParameterCount();
+ size_t return_count = call_descriptor->ReturnCount();
+ for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
+ if (call_descriptor->GetParameterType(i) == input_type) {
parameter_count += num_replacements - 1;
}
}
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
+ if (call_descriptor->GetReturnType(i) == input_type) {
return_count += num_replacements - 1;
}
}
- if (parameter_count == descriptor->ParameterCount() &&
- return_count == descriptor->ReturnCount()) {
- return descriptor;
+ if (parameter_count == call_descriptor->ParameterCount() &&
+ return_count == call_descriptor->ReturnCount()) {
+ return call_descriptor;
}
LocationSignature::Builder locations(zone, return_count, parameter_count);
Allocator params = parameter_registers;
- for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
+ if (call_descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddParam(params.Next(output_type));
}
} else {
locations.AddParam(
- params.Next(descriptor->GetParameterType(i).representation()));
+ params.Next(call_descriptor->GetParameterType(i).representation()));
}
}
Allocator rets = return_registers;
rets.AdjustStackOffset(params.stack_offset);
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
+ if (call_descriptor->GetReturnType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddReturn(rets.Next(output_type));
}
} else {
locations.AddReturn(
- rets.Next(descriptor->GetReturnType(i).representation()));
+ rets.Next(call_descriptor->GetReturnType(i).representation()));
}
}
- return new (zone) CallDescriptor( // --
- descriptor->kind(), // kind
- descriptor->GetInputType(0), // target MachineType
- descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- descriptor->properties(), // properties
- descriptor->CalleeSavedRegisters(), // callee-saved registers
- descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- descriptor->flags(), // flags
- descriptor->debug_name(), // debug name
- descriptor->AllocatableRegisters(), // allocatable registers
- rets.stack_offset - params.stack_offset); // stack_return_count
+ return new (zone) CallDescriptor( // --
+ call_descriptor->kind(), // kind
+ call_descriptor->GetInputType(0), // target MachineType
+ call_descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ call_descriptor->properties(), // properties
+ call_descriptor->CalleeSavedRegisters(), // callee-saved registers
+ call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ call_descriptor->flags(), // flags
+ call_descriptor->debug_name(), // debug name
+ call_descriptor->AllocatableRegisters(), // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
- CallDescriptor* descriptor) {
- return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
+ CallDescriptor* call_descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 2,
MachineType::Int64(),
MachineRepresentation::kWord32);
}
-CallDescriptor* GetI32WasmCallDescriptorForSimd(Zone* zone,
- CallDescriptor* descriptor) {
- return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
+CallDescriptor* GetI32WasmCallDescriptorForSimd(
+ Zone* zone, CallDescriptor* call_descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 4,
MachineType::Simd128(),
MachineRepresentation::kWord32);
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index bc92f9707c..cc6d758a9a 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -296,13 +296,25 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter& i, int pc) {
- const X64MemoryProtection protection =
- static_cast<X64MemoryProtection>(MiscField::decode(opcode));
- if (protection == X64MemoryProtection::kProtected) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessProtected) {
const bool frame_elided = !codegen->frame_access_state()->has_frame();
new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, instr);
}
}
+
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ X64OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->andq(value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
@@ -570,32 +582,44 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(rbx);
+ __ cmpq(rbx, kJavaScriptCallCodeStartRegister);
+ __ Assert(equal, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
-// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // Load effective address to get the address of the current instruction into
- // rcx.
- __ leaq(rcx, Operand(&current));
- __ bind(&current);
- int pc = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ movp(rcx, Operand(rcx, offset));
- __ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ movp(rbx, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
-inline bool HasCallDescriptorFlag(Instruction* instr,
- CallDescriptor::Flag flag) {
- return MiscField::decode(instr->opcode()) & flag;
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(rbx);
+ __ movp(kSpeculationPoisonRegister, Immediate(0));
+ __ cmpp(kJavaScriptCallCodeStartRegister, rbx);
+ __ movp(rbx, Immediate(-1));
+ __ cmovq(equal, kSpeculationPoisonRegister, rbx);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ andq(kContextRegister, kSpeculationPoisonRegister);
+ __ andq(rsp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -697,6 +721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister)) {
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ DCHECK_EQ(rcx, reg);
+ }
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -714,6 +742,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
@@ -1747,20 +1776,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1771,17 +1804,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movb(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1791,6 +1827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1801,6 +1838,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movw(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -1825,10 +1863,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(operand, i.InputRegister(index));
}
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1843,6 +1883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(operand, i.InputRegister(index));
}
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -2064,9 +2105,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsFPRegister()) {
- __ Movss(dst, i.InputDoubleRegister(0));
+ __ movss(dst, i.InputDoubleRegister(0));
} else {
- __ Movss(dst, i.InputOperand(0));
+ __ movss(dst, i.InputOperand(0));
}
__ shufps(dst, dst, 0x0);
break;
@@ -2085,6 +2126,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
break;
}
+ case kX64F32x4Abs: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(i.OutputSimd128Register(), kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ psrld(dst, 1);
+ __ andps(dst, i.InputSimd128Register(0));
+ }
+ break;
+ }
+ case kX64F32x4Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pslld(dst, 31);
+ __ xorps(dst, i.InputSimd128Register(0));
+ }
+ break;
+ }
case kX64F32x4RecipApprox: {
__ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2098,6 +2167,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64F32x4AddHoriz: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ haddps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64F32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
@@ -2271,7 +2345,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ pshuflw(dst, dst, 0x0);
- __ pshufhw(dst, dst, 0x0);
__ pshufd(dst, dst, 0x0);
break;
}
@@ -2571,77 +2644,77 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
- case kAtomicExchangeInt8: {
+ case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movzxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeInt16: {
+ case kWord32AtomicExchangeInt16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movzxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kAtomicCompareExchangeInt8: {
+ case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kAtomicCompareExchangeUint8: {
+ case kWord32AtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movzxbl(rax, rax);
break;
}
- case kAtomicCompareExchangeInt16: {
+ case kWord32AtomicCompareExchangeInt16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kAtomicCompareExchangeUint16: {
+ case kWord32AtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movzxwl(rax, rax);
break;
}
- case kAtomicCompareExchangeWord32: {
+ case kWord32AtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
__ movsxbl(rax, rax); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
__ movzxbl(rax, rax); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
__ movsxwl(rax, rax); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
__ movzxwl(rax, rax); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
break;
ATOMIC_BINOP_CASE(Add, addl)
@@ -2650,14 +2723,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orl)
ATOMIC_BINOP_CASE(Xor, xorl)
#undef ATOMIC_BINOP_CASE
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -2700,6 +2773,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+#undef ASSEMBLE_UNOP
+#undef ASSEMBLE_BINOP
+#undef ASSEMBLE_COMPARE
+#undef ASSEMBLE_MULT
+#undef ASSEMBLE_SHIFT
+#undef ASSEMBLE_MOVX
+#undef ASSEMBLE_SSE_BINOP
+#undef ASSEMBLE_SSE_UNOP
+#undef ASSEMBLE_AVX_BINOP
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_ATOMIC_BINOP
+
} // namespace
// Assembles branches after this instruction.
@@ -2718,6 +2804,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ movl(kScratchRegister, Immediate(0));
+ __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
+ kScratchRegister);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -2799,8 +2898,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
// Use rcx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), rcx);
} else {
@@ -2858,7 +2957,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
-
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2892,9 +2990,9 @@ static const int kQuadWordSize = 16;
} // namespace
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
frame->AlignSavedCalleeRegisterSlots();
if (saves_fp != 0) { // Save callee-saved XMM registers.
@@ -2903,7 +3001,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kQuadWordSize / kPointerSize));
}
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
int count = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
@@ -2916,16 +3014,16 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
int pc_base = __ pc_offset();
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ pushq(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2934,8 +3032,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2948,10 +3046,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm() && shrink_slots > 128) {
@@ -3024,10 +3123,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3038,7 +3137,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ popq(Register::from_code(i));
}
}
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
@@ -3058,11 +3157,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Might need rcx for scratch if pop_size is too big or if there is a variable
// pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rdx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
X64OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
@@ -3097,147 +3196,159 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
X64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ movq(g.ToRegister(destination), src);
- } else {
- __ movq(g.ToOperand(destination), src);
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ movq(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = kScratchRegister;
- Operand dst = g.ToOperand(destination);
- __ movq(tmp, src);
- __ movq(dst, tmp);
- }
- } else if (source->IsConstant()) {
- ConstantOperand* constant_source = ConstantOperand::cast(source);
- Constant src = g.ToConstant(constant_source);
- if (destination->IsRegister() || destination->IsStackSlot()) {
- Register dst = destination->IsRegister() ? g.ToRegister(destination)
- : kScratchRegister;
- switch (src.type()) {
- case Constant::kInt32: {
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
- } else {
- int32_t value = src.ToInt32();
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
- __ movl(dst, Immediate(value, src.rmode()));
- } else if (value == 0) {
- __ xorl(dst, dst);
- } else {
- __ movl(dst, Immediate(value));
- }
- }
- break;
- }
- case Constant::kInt64:
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
- } else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
- __ Set(dst, src.ToInt64());
- }
- break;
- case Constant::kFloat32:
- __ MoveNumber(dst, src.ToFloat32());
- break;
- case Constant::kFloat64:
- __ MoveNumber(dst, src.ToFloat64().value());
- break;
- case Constant::kExternalReference:
- __ Move(dst, src.ToExternalReference());
- break;
- case Constant::kHeapObject: {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ switch (src.type()) {
+ case Constant::kInt32: {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ int32_t value = src.ToInt32();
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
+ __ movl(dst, Immediate(value, src.rmode()));
+ } else if (value == 0) {
+ __ xorl(dst, dst);
} else {
- __ Move(dst, src_object);
+ __ movl(dst, Immediate(value));
}
- break;
}
- case Constant::kRpoNumber:
- UNREACHABLE(); // TODO(dcarney): load of labels on x64.
- break;
+ break;
}
- if (destination->IsStackSlot()) {
- __ movq(g.ToOperand(destination), kScratchRegister);
+ case Constant::kInt64:
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
+ __ Set(dst, src.ToInt64());
+ }
+ break;
+ case Constant::kFloat32:
+ __ MoveNumber(dst, src.ToFloat32());
+ break;
+ case Constant::kFloat64:
+ __ MoveNumber(dst, src.ToFloat64().value());
+ break;
+ case Constant::kExternalReference:
+ __ Move(dst, src.ToExternalReference());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
+ break;
}
- } else if (src.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
- if (destination->IsFPRegister()) {
- __ Move(g.ToDoubleRegister(destination), src_const);
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): load of labels on x64.
+ break;
+ }
+ };
+ // Dispatch on the source and destination operand kinds.
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ movq(g.ToRegister(destination), g.ToRegister(source));
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ movl(dst, Immediate(src_const));
+ DCHECK(source->IsFPRegister());
+ __ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- uint64_t src_const = src.ToFloat64().AsUint64();
- if (destination->IsFPRegister()) {
- __ Move(g.ToDoubleRegister(destination), src_const);
+ return;
+ case MoveType::kRegisterToStack: {
+ Operand dst = g.ToOperand(destination);
+ if (source->IsRegister()) {
+ __ movq(dst, g.ToRegister(source));
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
+ }
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ Operand src = g.ToOperand(source);
+ if (source->IsStackSlot()) {
+ __ movq(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- __ movq(kScratchRegister, src_const);
- __ movq(g.ToOperand(destination), kScratchRegister);
+ DCHECK(source->IsFPStackSlot());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
}
+ return;
}
- } else if (source->IsFPRegister()) {
- XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(dst, src);
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
- } else {
- __ Movups(dst, src);
+ if (source->IsStackSlot()) {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ __ movq(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ } else {
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(dst, kScratchDoubleReg);
+ }
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
} else {
- __ Movups(dst, src);
+ DCHECK(destination->IsFPRegister());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ Move(dst, bit_cast<uint32_t>(src.ToFloat32()));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ Move(dst, src.ToFloat64().AsUint64());
+ }
}
- } else {
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(dst, kScratchDoubleReg);
+ if (destination->IsStackSlot()) {
+ MoveConstantToRegister(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
} else {
- __ Movups(kScratchDoubleReg, src);
- __ Movups(dst, kScratchDoubleReg);
+ DCHECK(destination->IsFPStackSlot());
+ if (src.type() == Constant::kFloat32) {
+ __ movl(dst, Immediate(bit_cast<uint32_t>(src.ToFloat32())));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ movq(kScratchRegister, src.ToFloat64().AsUint64());
+ __ movq(dst, kScratchRegister);
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
@@ -3246,88 +3357,95 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ movq(kScratchRegister, src);
- __ movq(src, dst);
- __ movq(dst, kScratchRegister);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- Register src = g.ToRegister(source);
- __ pushq(src);
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- Operand dst = g.ToOperand(destination);
- __ movq(src, dst);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
- // Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- Register tmp = kScratchRegister;
- __ movq(tmp, dst);
- __ pushq(src); // Then use stack to copy src to destination.
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ movq(src, tmp);
- } else {
- // Without AVX, misaligned reads and writes will trap. Move using the
- // stack, in two parts.
- __ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
- __ pushq(src); // Then use stack to copy src to destination.
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ pushq(g.ToOperand(source, kPointerSize));
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(g.ToOperand(destination, kPointerSize));
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ movups(src, kScratchDoubleReg);
- }
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- // XMM register-register swap.
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(kScratchDoubleReg, src);
- __ Movapd(src, dst);
- __ Movapd(dst, kScratchDoubleReg);
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- // XMM register-memory swap.
- XMMRegister src = g.ToDoubleRegister(source);
- Operand dst = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(src, dst);
- __ Movsd(dst, kScratchDoubleReg);
- } else {
- __ Movups(kScratchDoubleReg, src);
- __ Movups(src, dst);
- __ Movups(dst, kScratchDoubleReg);
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ Movapd(kScratchDoubleReg, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, kScratchDoubleReg);
+ }
+ return;
+ }
+ case MoveType::kRegisterToStack: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ movq(src, g.ToOperand(destination));
+ frame_access_state()->IncreaseSPDelta(-1);
+ __ popq(g.ToOperand(destination));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(src, dst);
+ __ Movups(dst, kScratchDoubleReg);
+ }
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ Register tmp = kScratchRegister;
+ __ movq(tmp, dst);
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ movq(src, tmp);
+ } else {
+ // Without AVX, misaligned reads and writes will trap. Move using the
+ // stack, in two parts.
+ __ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ pushq(g.ToOperand(source, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(g.ToOperand(destination, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ movups(src, kScratchDoubleReg);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dq(targets[index]);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 6d9bc6f820..e758072050 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -149,9 +149,12 @@ namespace compiler {
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
+ V(X64F32x4AddHoriz) \
V(X64F32x4Sub) \
V(X64F32x4Mul) \
V(X64F32x4Min) \
@@ -266,8 +269,6 @@ namespace compiler {
V(M8I) /* [ %r2*8 + K] */ \
V(Root) /* [%root + K] */
-enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index c16fee5861..1d0e182303 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -128,7 +128,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4ReplaceLane:
case kX64F32x4RecipApprox:
case kX64F32x4RecipSqrtApprox:
+ case kX64F32x4Abs:
+ case kX64F32x4Neg:
case kX64F32x4Add:
+ case kX64F32x4AddHoriz:
case kX64F32x4Sub:
case kX64F32x4Mul:
case kX64F32x4Min:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index a0f14c687c..25dc5e9658 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -307,11 +307,16 @@ void InstructionSelector::VisitLoad(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
- code |= MiscField::encode(X64MemoryProtection::kProtected);
+ code |= MiscField::encode(kMemoryAccessProtected);
+ } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ code |= MiscField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -391,7 +396,7 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
- MiscField::encode(X64MemoryProtection::kProtected);
+ MiscField::encode(kMemoryAccessProtected);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
@@ -1139,7 +1144,8 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
return false;
}
}
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1248,7 +1254,12 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(BitcastInt32ToFloat32, kX64BitcastIF) \
V(BitcastInt64ToFloat64, kX64BitcastLD) \
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kX64Movsxbl) \
+ V(SignExtendWord16ToInt32, kX64Movsxwl) \
+ V(SignExtendWord8ToInt64, kX64Movsxbq) \
+ V(SignExtendWord16ToInt64, kX64Movsxwq) \
+ V(SignExtendWord32ToInt64, kX64Movsxlq)
#define RR_OP_LIST(V) \
V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
@@ -1425,14 +1436,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
X64OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1477,9 +1488,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
X64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1488,7 +1499,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1733,19 +1744,9 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
// Compare(Load(js_stack_limit), LoadStackPointer)
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kX64StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
- }
+ CHECK(cont->IsBranch());
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
return;
}
}
@@ -1782,12 +1783,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, opcode, right, left, cont, false);
}
+} // namespace
+
// Shared routine for word comparison against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1796,23 +1798,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
@@ -1820,44 +1822,44 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kX64Test, cont);
+ return VisitWordCompare(this, value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(selector, value, kX64Cmp, cont);
+ return VisitCompareZero(this, value, kX64Cmp, cont);
}
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan: {
Float64BinopMatcher m(value);
if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
@@ -1871,16 +1873,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// avoids the costly Float64Abs.
cont->OverwriteAndNegateIfEqual(kNotEqual);
InstructionCode const opcode =
- selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
- return VisitCompare(selector, opcode, m.left().node(),
+ IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+ return VisitCompare(this, opcode, m.left().node(),
m.right().InputAt(0), cont, false);
}
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
}
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1892,23 +1894,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Add32, cont);
+ return VisitBinop(this, node, kX64Add32, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Sub32, cont);
+ return VisitBinop(this, node, kX64Sub32, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Imul32, cont);
+ return VisitBinop(this, node, kX64Imul32, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Add, cont);
+ return VisitBinop(this, node, kX64Add, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Sub, cont);
+ return VisitBinop(this, node, kX64Sub, cont);
default:
break;
}
@@ -1916,51 +1918,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kX64Test32, cont);
+ return VisitWordCompare(this, value, kX64Test32, cont);
default:
break;
}
}
// Branch could not be combined with a compare, emit compare against 0.
- VisitCompareZero(selector, value, kX64Cmp32, cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ VisitCompareZero(this, value, kX64Cmp32, cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1968,27 +1935,30 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = g.TempRegister();
- if (sw.min_value) {
- // The leal automatically zero extends, so result is a valid 64-bit index.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- } else {
- // Zero extend, because we use it as 64-bit index into the jump table.
- Emit(kX64Movl, index_operand, value_operand);
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = g.TempRegister();
+ if (sw.min_value) {
+ // The leal automatically zero extends, so result is a valid 64-bit
+ // index.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ } else {
+ // Zero extend, because we use it as 64-bit index into the jump table.
+ Emit(kX64Movl, index_operand, value_operand);
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2001,7 +1971,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(user);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
@@ -2182,7 +2152,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
@@ -2191,7 +2161,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2201,13 +2171,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -2229,7 +2199,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2238,15 +2208,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2269,7 +2239,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2279,15 +2249,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2355,11 +2325,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,6 +2347,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -2437,6 +2409,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
@@ -2565,6 +2539,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
index 8bb5903e54..e1c6000d4f 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
+#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
#include "src/eh-frame.h"
@@ -76,4 +76,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index c1bca7557e..02337fb456 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -32,59 +32,41 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
- V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_BUFFER_INDEX, JSFunction, \
- typed_array_construct_by_array_buffer) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_LIKE_INDEX, JSFunction, \
- typed_array_construct_by_array_like) \
- V(TYPED_ARRAY_CONSTRUCT_BY_LENGTH_INDEX, JSFunction, \
- typed_array_construct_by_length) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
- V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
- V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject) \
- V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
- V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
V(ARRAY_POP_INDEX, JSFunction, array_pop) \
V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
@@ -95,7 +77,6 @@ enum ContextLookupFlags {
V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(ERROR_TO_STRING, JSFunction, error_to_string) \
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
@@ -121,8 +102,13 @@ enum ContextLookupFlags {
V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
wasm_compile_error_function) \
V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
- V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
+// If you add something here, also add it to ARRAY_ITERATOR_LIST in
+// bootstrapper.cc.
#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
V(TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, typed_array_key_iterator_map) \
V(FAST_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, fast_array_key_iterator_map) \
@@ -146,6 +132,10 @@ enum ContextLookupFlags {
float64_array_key_value_iterator_map) \
V(UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
uint8_clamped_array_key_value_iterator_map) \
+ V(BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ biguint64_array_key_value_iterator_map) \
+ V(BIGINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ bigint64_array_key_value_iterator_map) \
\
V(FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
fast_smi_array_key_value_iterator_map) \
@@ -176,6 +166,10 @@ enum ContextLookupFlags {
float64_array_value_iterator_map) \
V(UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
uint8_clamped_array_value_iterator_map) \
+ V(BIGUINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ biguint64_array_value_iterator_map) \
+ V(BIGINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ bigint64_array_value_iterator_map) \
\
V(FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
fast_smi_array_value_iterator_map) \
@@ -197,35 +191,20 @@ enum ContextLookupFlags {
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
accessor_property_descriptor_map) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(ALLOW_WASM_EVAL_INDEX, Object, allow_wasm_eval) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
- V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_reject_shared_fun) \
- V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_resolve_shared_fun) \
V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
async_generator_function_function) \
V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
async_iterator_value_unwrap_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_reject_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_resolve_shared_fun) \
- V(ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_yield_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_reject_shared_fun) \
V(ATOMICS_OBJECT, JSObject, atomics_object) \
V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \
+ V(BIGINT64_ARRAY_FUN_INDEX, JSFunction, bigint64_array_fun) \
+ V(BIGUINT64_ARRAY_FUN_INDEX, JSFunction, biguint64_array_fun) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
bound_function_with_constructor_map) \
@@ -261,13 +240,18 @@ enum ContextLookupFlags {
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_array_iterator_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_ARRAY_PROTOTYPE_MAP_INDEX, Map, initial_array_prototype_map) \
V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \
V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \
initial_async_generator_prototype) \
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
+ V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \
V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \
+ V(INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX, Map, initial_weakmap_prototype_map) \
+ V(INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX, Map, initial_weakset_prototype_map) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
@@ -320,9 +304,10 @@ enum ContextLookupFlags {
V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
promise_get_capabilities_executor_shared_fun) \
- V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- promise_resolve_shared_fun) \
- V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_reject_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_resolve_shared_fun) \
V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \
promise_then_finally_shared_fun) \
V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \
@@ -333,7 +318,7 @@ enum ContextLookupFlags {
promise_thrower_finally_shared_fun) \
V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
promise_all_resolve_element_shared_fun) \
- V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map) \
+ V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
@@ -356,7 +341,7 @@ enum ContextLookupFlags {
slow_object_with_null_prototype_map) \
V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
slow_object_with_object_prototype_map) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, NumberDictionary, \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary, \
slow_template_instantiations_cache) \
/* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
/* must remain together. */ \
@@ -406,7 +391,6 @@ enum ContextLookupFlags {
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
- V(TEMPLATE_MAP_INDEX, HeapObject, template_map) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 21f90a50ae..b64f016df2 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -72,8 +72,10 @@ inline double DoubleToInteger(double x) {
int32_t DoubleToInt32(double x) {
- int32_t i = FastD2I(x);
- if (FastI2D(i) == x) return i;
+ if ((std::isfinite(x)) && (x <= INT_MAX) && (x >= INT_MIN)) {
+ int32_t i = static_cast<int32_t>(x);
+ if (FastI2D(i) == x) return i;
+ }
Double d(x);
int exponent = d.Exponent();
if (exponent < 0) {
@@ -94,14 +96,15 @@ bool DoubleToSmiInteger(double value, int* smi_int_value) {
}
bool IsSmiDouble(double value) {
- return !IsMinusZero(value) && value >= Smi::kMinValue &&
- value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
+ return std::isfinite(value) && !IsMinusZero(value) &&
+ value >= Smi::kMinValue && value <= Smi::kMaxValue &&
+ value == FastI2D(FastD2I(value));
}
bool IsInt32Double(double value) {
- return !IsMinusZero(value) && value >= kMinInt && value <= kMaxInt &&
- value == FastI2D(FastD2I(value));
+ return std::isfinite(value) && !IsMinusZero(value) && value >= kMinInt &&
+ value <= kMaxInt && value == FastI2D(FastD2I(value));
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index c5ea1b8366..827ccbd773 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -175,8 +175,8 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
}
// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
-// https://tc39.github.io/proposal-bigint/#sec-bigint-parseint-string-radix
-// (with BigIntParseIntHelper subclass).
+// and BigInt parsing cases from https://tc39.github.io/proposal-bigint/
+// (with StringToBigIntHelper subclass).
class StringToIntHelper {
public:
StringToIntHelper(Isolate* isolate, Handle<String> subject, int radix)
@@ -852,17 +852,12 @@ double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
return helper.GetResult();
}
-class BigIntParseIntHelper : public StringToIntHelper {
+class StringToBigIntHelper : public StringToIntHelper {
public:
- enum class Behavior { kParseInt, kStringToBigInt, kLiteral };
-
- // Used for BigInt.parseInt API, where the input is a Heap-allocated String.
- BigIntParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
- : StringToIntHelper(isolate, string, radix),
- behavior_(Behavior::kParseInt) {}
+ enum class Behavior { kStringToBigInt, kLiteral };
// Used for StringToBigInt operation (BigInt constructor and == operator).
- BigIntParseIntHelper(Isolate* isolate, Handle<String> string)
+ StringToBigIntHelper(Isolate* isolate, Handle<String> string)
: StringToIntHelper(isolate, string),
behavior_(Behavior::kStringToBigInt) {
set_allow_binary_and_octal_prefixes();
@@ -871,7 +866,7 @@ class BigIntParseIntHelper : public StringToIntHelper {
// Used for parsing BigInt literals, where the input is a buffer of
// one-byte ASCII digits, along with an optional radix prefix.
- BigIntParseIntHelper(Isolate* isolate, const uint8_t* string, int length)
+ StringToBigIntHelper(Isolate* isolate, const uint8_t* string, int length)
: StringToIntHelper(isolate, string, length),
behavior_(Behavior::kLiteral) {
set_allow_binary_and_octal_prefixes();
@@ -884,9 +879,7 @@ class BigIntParseIntHelper : public StringToIntHelper {
return MaybeHandle<BigInt>();
}
if (state() == kEmpty) {
- if (behavior_ == Behavior::kParseInt) {
- set_state(kJunk);
- } else if (behavior_ == Behavior::kStringToBigInt) {
+ if (behavior_ == Behavior::kStringToBigInt) {
set_state(kZero);
} else {
UNREACHABLE();
@@ -924,9 +917,12 @@ class BigIntParseIntHelper : public StringToIntHelper {
// Optimization opportunity: Would it makes sense to scan for trailing
// junk before allocating the result?
int charcount = length() - cursor();
- // TODO(adamk): Pretenure if this is for a literal.
- MaybeHandle<FreshlyAllocatedBigInt> maybe =
- BigInt::AllocateFor(isolate(), radix(), charcount, should_throw());
+ // For literals, we pretenure the allocated BigInt, since it's about
+ // to be stored in the interpreter's constants array.
+ PretenureFlag pretenure =
+ behavior_ == Behavior::kLiteral ? TENURED : NOT_TENURED;
+ MaybeHandle<FreshlyAllocatedBigInt> maybe = BigInt::AllocateFor(
+ isolate(), radix(), charcount, should_throw(), pretenure);
if (!maybe.ToHandle(&result_)) {
set_state(kError);
}
@@ -938,28 +934,20 @@ class BigIntParseIntHelper : public StringToIntHelper {
}
private:
- ShouldThrow should_throw() const {
- return behavior_ == Behavior::kParseInt ? kThrowOnError : kDontThrow;
- }
+ ShouldThrow should_throw() const { return kDontThrow; }
Handle<FreshlyAllocatedBigInt> result_;
Behavior behavior_;
};
-MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
- int radix) {
- BigIntParseIntHelper helper(isolate, string, radix);
- return helper.GetResult();
-}
-
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
string = String::Flatten(string);
- BigIntParseIntHelper helper(isolate, string);
+ StringToBigIntHelper helper(isolate, string);
return helper.GetResult();
}
MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string) {
- BigIntParseIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
+ StringToBigIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
static_cast<int>(strlen(string)));
return helper.GetResult();
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 915a286e8f..6189fe0aa1 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -105,9 +105,6 @@ double StringToDouble(UnicodeCache* unicode_cache,
double StringToInt(Isolate* isolate, Handle<String> string, int radix);
-// This follows BigInt.parseInt semantics: "" => SyntaxError.
-MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
- int radix);
// This follows https://tc39.github.io/proposal-bigint/#sec-string-to-bigint
// semantics: "" => 0n.
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string);
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index e41fa276a8..001beb938e 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -84,7 +84,6 @@ void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
- // Compute the delta between start and stop, in microseconds.
int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
? timer->Elapsed().InMicroseconds()
: timer->Elapsed().InMilliseconds();
@@ -96,6 +95,21 @@ void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
}
}
+void TimedHistogram::RecordAbandon(base::ElapsedTimer* timer,
+ Isolate* isolate) {
+ if (Enabled()) {
+ DCHECK(timer->IsStarted());
+ timer->Stop();
+ int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
+ ? base::TimeDelta::Max().InMicroseconds()
+ : base::TimeDelta::Max().InMilliseconds();
+ AddSample(static_cast<int>(sample));
+ }
+ if (isolate != nullptr) {
+ Logger::CallEventLogger(isolate, name(), Logger::END, true);
+ }
+}
+
Counters::Counters(Isolate* isolate)
: isolate_(isolate),
stats_table_(this),
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index b3c6f8c8ff..e06cb8b66d 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -220,6 +220,12 @@ class Histogram {
int max() const { return max_; }
int num_buckets() const { return num_buckets_; }
+ // Asserts that |expected_counters| are the same as the Counters this
+ // Histogram reports to.
+ void AssertReportsToCounters(Counters* expected_counters) {
+ DCHECK_EQ(counters_, expected_counters);
+ }
+
protected:
Histogram() {}
Histogram(const char* name, int min, int max, int num_buckets,
@@ -229,7 +235,9 @@ class Histogram {
max_(max),
num_buckets_(num_buckets),
histogram_(nullptr),
- counters_(counters) {}
+ counters_(counters) {
+ DCHECK(counters_);
+ }
Counters* counters() const { return counters_; }
@@ -261,6 +269,10 @@ class TimedHistogram : public Histogram {
// Stop the timer and record the results. Log if isolate non-null.
void Stop(base::ElapsedTimer* timer, Isolate* isolate);
+ // Records a TimeDelta::Max() result. Useful to record percentage of tasks
+ // that never got to run in a given scenario. Log if isolate non-null.
+ void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
+
protected:
friend class Counters;
HistogramTimerResolution resolution_;
@@ -282,6 +294,7 @@ class TimedHistogramScope {
: histogram_(histogram), isolate_(isolate) {
histogram_->Start(&timer_, isolate);
}
+
~TimedHistogramScope() { histogram_->Stop(&timer_, isolate_); }
private:
@@ -292,6 +305,42 @@ class TimedHistogramScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
};
+// Helper class for recording a TimedHistogram asynchronously with manual
+// controls (it will not generate a report if destroyed without explicitly
+// triggering a report). |async_counters| should be a shared_ptr to
+// |histogram->counters()|, making it is safe to report to an
+// AsyncTimedHistogram after the associated isolate has been destroyed.
+// AsyncTimedHistogram can be moved/copied to avoid computing Now() multiple
+// times when the times of multiple tasks are identical; each copy will generate
+// its own report.
+class AsyncTimedHistogram {
+ public:
+ explicit AsyncTimedHistogram(TimedHistogram* histogram,
+ std::shared_ptr<Counters> async_counters)
+ : histogram_(histogram), async_counters_(std::move(async_counters)) {
+ histogram_->AssertReportsToCounters(async_counters_.get());
+ histogram_->Start(&timer_, nullptr);
+ }
+
+ ~AsyncTimedHistogram() = default;
+
+ AsyncTimedHistogram(const AsyncTimedHistogram& other) = default;
+ AsyncTimedHistogram& operator=(const AsyncTimedHistogram& other) = default;
+ AsyncTimedHistogram(AsyncTimedHistogram&& other) = default;
+ AsyncTimedHistogram& operator=(AsyncTimedHistogram&& other) = default;
+
+ // Records the time elapsed to |histogram_| and stops |timer_|.
+ void RecordDone() { histogram_->Stop(&timer_, nullptr); }
+
+ // Records TimeDelta::Max() to |histogram_| and stops |timer_|.
+ void RecordAbandon() { histogram_->RecordAbandon(&timer_, nullptr); }
+
+ private:
+ base::ElapsedTimer timer_;
+ TimedHistogram* histogram_;
+ std::shared_ptr<Counters> async_counters_;
+};
+
// Helper class for scoping a TimedHistogram, where the histogram is selected at
// stop time rather than start time.
// TODO(leszeks): This is heavily reliant on TimedHistogram::Start() doing
@@ -643,6 +692,8 @@ class RuntimeCallTimer final {
V(ArrayBuffer_New) \
V(Array_CloneElementAt) \
V(Array_New) \
+ V(BigInt64Array_New) \
+ V(BigUint64Array_New) \
V(BooleanObject_BooleanValue) \
V(BooleanObject_New) \
V(Context_New) \
@@ -795,7 +846,6 @@ class RuntimeCallTimer final {
V(CompileBackgroundEval) \
V(CompileBackgroundIgnition) \
V(CompileBackgroundScript) \
- V(CompileBackgroundRenumber) \
V(CompileBackgroundRewriteReturnResult) \
V(CompileBackgroundScopeAnalysis) \
V(CompileDeserialize) \
@@ -805,7 +855,6 @@ class RuntimeCallTimer final {
V(CompileGetFromOptimizedCodeMap) \
V(CompileIgnition) \
V(CompileIgnitionFinalization) \
- V(CompileRenumber) \
V(CompileRewriteReturnResult) \
V(CompileScopeAnalysis) \
V(CompileScript) \
@@ -1170,7 +1219,9 @@ class RuntimeCallTimerScope {
HT(compile_script_no_cache_because_cache_too_cold, \
V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
HT(compile_script_on_background, \
- V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
+ V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
+ HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
+ MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 3aae30799f..c16b963776 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -361,8 +361,8 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
// We're disabling usage of waitid in Mac OS X because it doesn't work for us:
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
- && !defined(__NetBSD__)
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) && \
+ !defined(__NetBSD__) && !defined(__Fuchsia__)
#if !defined(__FreeBSD__)
#define HAS_WAITID 1
#endif
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 32f129821a..0f2ba4257e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -84,15 +84,18 @@ class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
allocator_->Free(data, length);
}
- void* Reserve(size_t length) override { return allocator_->Reserve(length); }
+ void* Reserve(size_t length) override {
+ UNIMPLEMENTED();
+ return nullptr;
+ }
void Free(void* data, size_t length, AllocationMode mode) override {
- allocator_->Free(data, length, mode);
+ UNIMPLEMENTED();
}
void SetProtection(void* data, size_t length,
Protection protection) override {
- allocator_->SetProtection(data, length, protection);
+ UNIMPLEMENTED();
}
private:
@@ -121,18 +124,6 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
}
- void* Reserve(size_t length) override {
- // |length| must be over the threshold so we can distinguish VM from
- // malloced memory.
- DCHECK_LE(kVMThreshold, length);
- return ArrayBufferAllocatorBase::Reserve(length);
- }
-
- void Free(void* data, size_t length, AllocationMode) override {
- // Ignore allocation mode; the appropriate action is determined by |length|.
- Free(data, length);
- }
-
private:
static constexpr size_t kVMThreshold = 65536;
static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
@@ -172,14 +163,6 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
return ArrayBufferAllocatorBase::Free(data, Adjust(length));
}
- void* Reserve(size_t length) override {
- return ArrayBufferAllocatorBase::Reserve(Adjust(length));
- }
-
- void Free(void* data, size_t length, AllocationMode mode) override {
- return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
- }
-
private:
size_t Adjust(size_t length) {
const size_t kAllocationLimit = 10 * kMB;
@@ -621,8 +604,9 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
- if (options.compile_options == ScriptCompiler::kConsumeCodeCache ||
- options.compile_options == ScriptCompiler::kConsumeParserCache) {
+ DCHECK(options.compile_options != ScriptCompiler::kProduceParserCache);
+ DCHECK(options.compile_options != ScriptCompiler::kConsumeParserCache);
+ if (options.compile_options == ScriptCompiler::kConsumeCodeCache) {
ScriptCompiler::CachedData* cached_code =
LookupCodeCache(isolate, source);
if (cached_code != nullptr) {
@@ -656,9 +640,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
ScriptCompiler::Source script_source(source, origin);
maybe_script = ScriptCompiler::Compile(context, &script_source,
options.compile_options);
- if (options.compile_options == ScriptCompiler::kProduceParserCache) {
- StoreInCodeCache(isolate, source, script_source.GetCachedData());
- }
}
Local<Script> script;
@@ -957,9 +938,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
std::string source_url = ToSTLString(isolate, referrer);
std::string dir_name =
- DirName(IsAbsolutePath(source_url)
- ? source_url
- : NormalizePath(source_url, GetWorkingDirectory()));
+ DirName(NormalizePath(source_url, GetWorkingDirectory()));
std::string file_name = ToSTLString(isolate, specifier);
std::string absolute_path = NormalizePath(file_name, dir_name);
@@ -2576,7 +2555,11 @@ void SourceGroup::JoinThread() {
ExternalizedContents::~ExternalizedContents() {
if (base_ != nullptr) {
- Shell::array_buffer_allocator->Free(base_, length_, mode_);
+ if (mode_ == ArrayBuffer::Allocator::AllocationMode::kReservation) {
+ CHECK(i::FreePages(base_, length_));
+ } else {
+ Shell::array_buffer_allocator->Free(base_, length_);
+ }
}
}
@@ -2852,8 +2835,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
options.code_cache_options =
ShellOptions::CodeCacheOptions::kProduceCache;
- } else if (strncmp(value, "=parse", 7) == 0) {
- options.compile_options = v8::ScriptCompiler::kProduceParserCache;
} else if (strncmp(value, "=none", 6) == 0) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
options.code_cache_options =
@@ -2900,6 +2881,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--quiet-load") == 0) {
options.quiet_load = true;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--thread-pool-size=", 19) == 0) {
+ options.thread_pool_size = atoi(argv[i] + 19);
+ argv[i] = nullptr;
}
}
@@ -3085,6 +3069,13 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> Release() { return std::move(data_); }
+ void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
+ to->insert(to->end(),
+ std::make_move_iterator(externalized_contents_.begin()),
+ std::make_move_iterator(externalized_contents_.end()));
+ externalized_contents_.clear();
+ }
+
protected:
// Implements ValueSerializer::Delegate.
void ThrowDataCloneError(Local<String> message) override {
@@ -3102,6 +3093,8 @@ class Serializer : public ValueSerializer::Delegate {
size_t index = shared_array_buffers_.size();
shared_array_buffers_.emplace_back(isolate_, shared_array_buffer);
+ data_->shared_array_buffer_contents_.push_back(
+ MaybeExternalize(shared_array_buffer));
return Just<uint32_t>(static_cast<uint32_t>(index));
}
@@ -3155,7 +3148,7 @@ class Serializer : public ValueSerializer::Delegate {
return array_buffer->GetContents();
} else {
typename T::Contents contents = array_buffer->Externalize();
- data_->externalized_contents_.emplace_back(contents);
+ externalized_contents_.emplace_back(contents);
return contents;
}
}
@@ -3174,13 +3167,6 @@ class Serializer : public ValueSerializer::Delegate {
data_->array_buffer_contents_.push_back(contents);
}
- for (const auto& global_shared_array_buffer : shared_array_buffers_) {
- Local<SharedArrayBuffer> shared_array_buffer =
- Local<SharedArrayBuffer>::New(isolate_, global_shared_array_buffer);
- data_->shared_array_buffer_contents_.push_back(
- MaybeExternalize(shared_array_buffer));
- }
-
return Just(true);
}
@@ -3189,6 +3175,7 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> data_;
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
+ std::vector<ExternalizedContents> externalized_contents_;
size_t current_memory_usage_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
@@ -3216,16 +3203,21 @@ class Deserializer : public ValueDeserializer::Delegate {
deserializer_.TransferArrayBuffer(index++, array_buffer);
}
- index = 0;
- for (const auto& contents : data_->shared_array_buffer_contents()) {
- Local<SharedArrayBuffer> shared_array_buffer = SharedArrayBuffer::New(
- isolate_, contents.Data(), contents.ByteLength());
- deserializer_.TransferSharedArrayBuffer(index++, shared_array_buffer);
- }
-
return deserializer_.ReadValue(context);
}
+ MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
+ Isolate* isolate, uint32_t clone_id) override {
+ DCHECK_NOT_NULL(data_);
+ if (clone_id < data_->shared_array_buffer_contents().size()) {
+ SharedArrayBuffer::Contents contents =
+ data_->shared_array_buffer_contents().at(clone_id);
+ return SharedArrayBuffer::New(isolate_, contents.Data(),
+ contents.ByteLength());
+ }
+ return MaybeLocal<SharedArrayBuffer>();
+ }
+
private:
Isolate* isolate_;
ValueDeserializer deserializer_;
@@ -3242,9 +3234,11 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
if (serializer.WriteValue(context, value, transfer).To(&ok)) {
std::unique_ptr<SerializationData> data = serializer.Release();
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
- data->AppendExternalizedContentsTo(&externalized_contents_);
+ serializer.AppendExternalizedContentsTo(&externalized_contents_);
return data;
}
+ // Append externalized contents even when WriteValue fails.
+ serializer.AppendExternalizedContentsTo(&externalized_contents_);
return nullptr;
}
@@ -3318,8 +3312,8 @@ int Shell::Main(int argc, char* argv[]) {
platform::tracing::TracingController* tracing_controller = tracing.get();
g_platform = v8::platform::NewDefaultPlatform(
- 0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping,
- std::move(tracing));
+ options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
+ in_process_stack_dumping, std::move(tracing));
if (i::FLAG_verify_predictable) {
g_platform.reset(new PredictablePlatform(std::move(g_platform)));
}
@@ -3422,14 +3416,9 @@ int Shell::Main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv, false);
// Change the options to consume cache
- if (options.compile_options == v8::ScriptCompiler::kProduceParserCache) {
- options.compile_options = v8::ScriptCompiler::kConsumeParserCache;
- } else {
- DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
- options.compile_options ==
- v8::ScriptCompiler::kNoCompileOptions);
- options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
- }
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
printf("============ Run: Consume code cache ============\n");
// Second run to consume the cache in new isolate
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
deleted file mode 100644
index e6a40212cd..0000000000
--- a/deps/v8/src/d8.gyp
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- # Enable support for Intel VTune. Supported on ia32/x64 only
- 'v8_enable_vtunejit%': 0,
- 'v8_enable_i18n_support%': 1,
- },
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'd8',
- 'type': 'executable',
- 'dependencies': [
- 'v8.gyp:v8',
- 'v8.gyp:v8_libbase',
- 'v8.gyp:v8_libplatform',
- ],
- # Generated source files need this explicitly:
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- 'd8.h',
- 'd8.cc',
- 'd8-console.h',
- 'd8-console.cc',
- '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
- ],
- 'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'toolsets': [ 'target', ],
- 'dependencies': [
- 'd8_js2c#host',
- ],
- }, {
- 'dependencies': [
- 'd8_js2c',
- ],
- }],
- ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
- or OS=="openbsd" or OS=="solaris" or OS=="android" \
- or OS=="qnx" or OS=="aix")', {
- 'sources': [ 'd8-posix.cc', ]
- }],
- [ 'OS=="win"', {
- 'sources': [ 'd8-windows.cc', ]
- }],
- [ 'component!="shared_library"', {
- 'conditions': [
- [ 'v8_postmortem_support=="true"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': [
- '-Wl,-force_load,<(PRODUCT_DIR)/libv8_base.a'
- ],
- },
- }],
- ],
- }],
- ['v8_enable_vtunejit==1', {
- 'dependencies': [
- '../src/third_party/vtune/v8vtune.gyp:v8_vtune',
- ],
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ],
- },
- {
- 'target_name': 'd8_js2c',
- 'type': 'none',
- 'variables': {
- 'js_files': [
- 'd8.js',
- 'js/macros.py',
- ],
- },
- 'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
- 'actions': [
- {
- 'action_name': 'd8_js2c',
- 'inputs': [
- '../tools/js2c.py',
- '<@(js_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
- ],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<@(_outputs)',
- 'D8',
- '<@(js_files)'
- ],
- },
- ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'd8_run',
- 'type': 'none',
- 'dependencies': [
- 'd8',
- ],
- 'includes': [
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'd8.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 8fc6eab046..bf4793ef04 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -197,12 +197,6 @@ class SerializationData {
return shared_array_buffer_contents_;
}
- void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
- to->insert(to->end(),
- std::make_move_iterator(externalized_contents_.begin()),
- std::make_move_iterator(externalized_contents_.end()));
- externalized_contents_.clear();
- }
private:
struct DataDeleter {
@@ -213,7 +207,6 @@ class SerializationData {
size_t size_;
std::vector<ArrayBuffer::Contents> array_buffer_contents_;
std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
- std::vector<ExternalizedContents> externalized_contents_;
private:
friend class Serializer;
@@ -358,6 +351,7 @@ class ShellOptions {
int read_from_tcp_port;
bool enable_os_system = false;
bool quiet_load = false;
+ int thread_pool_size = 0;
};
class Shell : public i::AllStatic {
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index b8a9263d32..d9fa13dae5 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -5,8 +5,6 @@
#ifndef V8_DATE_H_
#define V8_DATE_H_
-#include "src/allocation.h"
-#include "src/base/platform/platform.h"
#include "src/base/timezone-cache.h"
#include "src/globals.h"
@@ -289,4 +287,4 @@ class DateCache {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_DATE_H_
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 47280bfbc9..e2c3f4738d 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -35,10 +35,9 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ Mov(fp, x1);
- __ AssertStackConsistency();
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(masm->StackPointer(), Operand(fp));
+ __ Mov(sp, fp);
__ Pop(fp, lr); // Frame, Return address.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index e5865e639c..6052149b81 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -21,29 +21,24 @@
namespace v8 {
namespace internal {
-static inline bool IsDebugContext(Isolate* isolate, Context* context) {
- return context->native_context() == *isolate->debug()->debug_context();
-}
-
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source) {
- // Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug());
-
- // Enter the top context from before the debugger was invoked.
- SaveContext save(isolate);
- SaveContext* top = &save;
- while (top != nullptr && IsDebugContext(isolate, *top->context())) {
- top = top->prev();
- }
- if (top != nullptr) isolate->set_context(*top->context());
-
- // Get the native context now set to the top context from before the
- // debugger was invoked.
Handle<Context> context = isolate->native_context();
- Handle<JSObject> receiver(context->global_proxy());
- Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- return Evaluate(isolate, outer_info, context, receiver, source, false);
+ ScriptOriginOptions origin_options(false, true);
+ MaybeHandle<SharedFunctionInfo> maybe_function_info =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, Compiler::ScriptDetails(isolate->factory()->empty_string()),
+ origin_options, nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
+
+ Handle<SharedFunctionInfo> shared_info;
+ if (!maybe_function_info.ToHandle(&shared_info)) return MaybeHandle<Object>();
+
+ Handle<JSFunction> fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
+ context);
+ return Execution::Call(isolate, fun,
+ Handle<JSObject>(context->global_proxy()), 0, nullptr);
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
@@ -278,6 +273,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToString) \
V(ToLength) \
V(ToNumber) \
+ V(ToBigInt) \
V(NumberToStringSkipCache) \
/* Type checks */ \
V(IsJSReceiver) \
@@ -292,7 +288,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(IsJSWeakSet) \
V(IsRegExp) \
V(IsTypedArray) \
- V(ClassOf) \
/* Loads */ \
V(LoadLookupSlotForCall) \
/* Arrays */ \
@@ -302,6 +297,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(TrySliceSimpleNonFastElements) \
V(HasComplexElements) \
V(EstimateNumberOfElements) \
+ V(NewArray) \
+ V(TypedArrayGetBuffer) \
/* Errors */ \
V(ReThrow) \
V(ThrowReferenceError) \
@@ -309,13 +306,14 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowIteratorResultNotAnObject) \
V(NewTypeError) \
V(ThrowInvalidStringLength) \
+ V(ThrowCalledNonCallable) \
/* Strings */ \
V(StringIndexOf) \
V(StringIncludes) \
V(StringReplaceOneCharWithString) \
V(StringToNumber) \
V(StringTrim) \
- V(SubString) \
+ V(StringSubstring) \
V(RegExpInternalReplace) \
/* BigInts */ \
V(BigIntEqualToBigInt) \
@@ -325,9 +323,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(CreateArrayLiteral) \
V(CreateObjectLiteral) \
V(CreateRegExpLiteral) \
- /* Collections */ \
- V(GenericHash) \
/* Called from builtins */ \
+ V(ClassOf) \
V(StringAdd) \
V(StringParseFloat) \
V(StringParseInt) \
@@ -343,18 +340,19 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(AllocateSeqOneByteString) \
V(AllocateSeqTwoByteString) \
V(ObjectCreate) \
+ V(ObjectEntries) \
+ V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectValues) \
+ V(ObjectValuesSkipFastPath) \
V(ArrayIndexOf) \
V(ArrayIncludes_Slow) \
V(ArrayIsArray) \
V(ThrowTypeError) \
- V(ThrowCalledOnNullOrUndefined) \
- V(ThrowIncompatibleMethodReceiver) \
- V(ThrowInvalidHint) \
- V(ThrowNotDateError) \
V(ThrowRangeError) \
V(ToName) \
V(GetOwnPropertyDescriptor) \
+ V(HasProperty) \
V(StackGuard) \
/* Misc. */ \
V(Call) \
@@ -362,7 +360,12 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(NewObject) \
V(CompleteInobjectSlackTrackingForMap) \
V(HasInPrototypeChain) \
- V(StringMaxLength)
+ V(StringMaxLength) \
+ /* Test */ \
+ V(OptimizeOsr) \
+ V(OptimizeFunctionOnNextCall) \
+ V(UnblockConcurrentRecompilation) \
+ V(GetOptimizationStatus)
#define CASE(Name) \
case Runtime::k##Name: \
@@ -383,6 +386,42 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef INTRINSIC_WHITELIST
}
+#ifdef DEBUG
+bool BuiltinToIntrinsicHasNoSideEffect(Builtins::Name builtin_id,
+ Runtime::FunctionId intrinsic_id) {
+ // First check the intrinsic whitelist.
+ if (IntrinsicHasNoSideEffect(intrinsic_id)) return true;
+
+// Whitelist intrinsics called from specific builtins.
+#define BUILTIN_INTRINSIC_WHITELIST(V, W) \
+ /* Arrays */ \
+ V(Builtins::kArrayFilter, W(CreateDataProperty)) \
+ V(Builtins::kArrayMap, W(CreateDataProperty)) \
+ V(Builtins::kArrayPrototypeSlice, W(CreateDataProperty) W(SetProperty)) \
+ /* TypedArrays */ \
+ V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
+ V(Builtins::kTypedArrayPrototypeMap, W(SetProperty))
+
+#define CASE(Builtin, ...) \
+ case Builtin: \
+ return (__VA_ARGS__ false);
+
+#define MATCH(Intrinsic) \
+ intrinsic_id == Runtime::k##Intrinsic || \
+ intrinsic_id == Runtime::kInline##Intrinsic ||
+
+ switch (builtin_id) {
+ BUILTIN_INTRINSIC_WHITELIST(CASE, MATCH)
+ default:
+ return false;
+ }
+
+#undef MATCH
+#undef CASE
+#undef BUILTIN_INTRINSIC_WHITELIST
+}
+#endif // DEBUG
+
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
typedef interpreter::Bytecode Bytecode;
typedef interpreter::Bytecodes Bytecodes;
@@ -512,6 +551,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kObjectPrototypePropertyIsEnumerable:
case Builtins::kObjectPrototypeToString:
// Array builtins.
+ case Builtins::kArrayIsArray:
case Builtins::kArrayConstructor:
case Builtins::kArrayIndexOf:
case Builtins::kArrayPrototypeValues:
@@ -520,11 +560,60 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeKeys:
+ case Builtins::kArrayPrototypeSlice:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
case Builtins::kArraySome:
+ case Builtins::kArrayConcat:
+ case Builtins::kArraySlice:
+ case Builtins::kArrayFilter:
+ case Builtins::kArrayMap:
case Builtins::kArrayReduce:
case Builtins::kArrayReduceRight:
+ // TypedArray builtins.
+ case Builtins::kTypedArrayConstructor:
+ case Builtins::kTypedArrayPrototypeBuffer:
+ case Builtins::kTypedArrayPrototypeByteLength:
+ case Builtins::kTypedArrayPrototypeByteOffset:
+ case Builtins::kTypedArrayPrototypeLength:
+ case Builtins::kTypedArrayPrototypeEntries:
+ case Builtins::kTypedArrayPrototypeKeys:
+ case Builtins::kTypedArrayPrototypeValues:
+ case Builtins::kTypedArrayPrototypeFind:
+ case Builtins::kTypedArrayPrototypeFindIndex:
+ case Builtins::kTypedArrayPrototypeIncludes:
+ case Builtins::kTypedArrayPrototypeIndexOf:
+ case Builtins::kTypedArrayPrototypeLastIndexOf:
+ case Builtins::kTypedArrayPrototypeSlice:
+ case Builtins::kTypedArrayPrototypeSubArray:
+ case Builtins::kTypedArrayPrototypeEvery:
+ case Builtins::kTypedArrayPrototypeSome:
+ case Builtins::kTypedArrayPrototypeFilter:
+ case Builtins::kTypedArrayPrototypeMap:
+ case Builtins::kTypedArrayPrototypeReduce:
+ case Builtins::kTypedArrayPrototypeReduceRight:
+ case Builtins::kTypedArrayPrototypeForEach:
+ // ArrayBuffer builtins.
+ case Builtins::kArrayBufferConstructor:
+ case Builtins::kArrayBufferPrototypeGetByteLength:
+ case Builtins::kArrayBufferIsView:
+ case Builtins::kArrayBufferPrototypeSlice:
+ case Builtins::kReturnReceiver:
+ // DataView builtins.
+ case Builtins::kDataViewConstructor:
+ case Builtins::kDataViewPrototypeGetBuffer:
+ case Builtins::kDataViewPrototypeGetByteLength:
+ case Builtins::kDataViewPrototypeGetByteOffset:
+ case Builtins::kDataViewPrototypeGetInt8:
+ case Builtins::kDataViewPrototypeGetUint8:
+ case Builtins::kDataViewPrototypeGetInt16:
+ case Builtins::kDataViewPrototypeGetUint16:
+ case Builtins::kDataViewPrototypeGetInt32:
+ case Builtins::kDataViewPrototypeGetUint32:
+ case Builtins::kDataViewPrototypeGetFloat32:
+ case Builtins::kDataViewPrototypeGetFloat64:
+ case Builtins::kDataViewPrototypeGetBigInt64:
+ case Builtins::kDataViewPrototypeGetBigUint64:
// Boolean bulitins.
case Builtins::kBooleanConstructor:
case Builtins::kBooleanPrototypeToString:
@@ -562,11 +651,17 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kDatePrototypeValueOf:
// Map builtins.
case Builtins::kMapConstructor:
+ case Builtins::kMapPrototypeForEach:
case Builtins::kMapPrototypeGet:
+ case Builtins::kMapPrototypeHas:
case Builtins::kMapPrototypeEntries:
case Builtins::kMapPrototypeGetSize:
case Builtins::kMapPrototypeKeys:
case Builtins::kMapPrototypeValues:
+ // WeakMap builtins.
+ case Builtins::kWeakMapConstructor:
+ case Builtins::kWeakMapGet:
+ case Builtins::kWeakMapHas:
// Math builtins.
case Builtins::kMathAbs:
case Builtins::kMathAcos:
@@ -619,8 +714,13 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
// Set builtins.
case Builtins::kSetConstructor:
case Builtins::kSetPrototypeEntries:
+ case Builtins::kSetPrototypeForEach:
case Builtins::kSetPrototypeGetSize:
+ case Builtins::kSetPrototypeHas:
case Builtins::kSetPrototypeValues:
+ // WeakSet builtins.
+ case Builtins::kWeakSetConstructor:
+ case Builtins::kWeakSetHas:
// String builtins. Strings are immutable.
case Builtins::kStringFromCharCode:
case Builtins::kStringFromCodePoint:
@@ -659,11 +759,11 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeToUpperCase:
#endif
case Builtins::kStringPrototypeTrim:
- case Builtins::kStringPrototypeTrimLeft:
- case Builtins::kStringPrototypeTrimRight:
+ case Builtins::kStringPrototypeTrimEnd:
+ case Builtins::kStringPrototypeTrimStart:
case Builtins::kStringPrototypeValueOf:
case Builtins::kStringToNumber:
- case Builtins::kSubString:
+ case Builtins::kStringSubstring:
// Symbol builtins.
case Builtins::kSymbolConstructor:
case Builtins::kSymbolKeyFor:
@@ -759,11 +859,6 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
DCHECK(Builtins::IsLazy(builtin_index));
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_index));
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing builtin %s\n",
- Builtins::name(builtin_index));
- }
-
code = Snapshot::DeserializeBuiltin(isolate, builtin_index);
DCHECK_NE(Builtins::kDeserializeLazy, code->builtin_index());
}
@@ -775,7 +870,9 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
Address address = rinfo->target_external_reference();
const Runtime::Function* function = Runtime::FunctionForEntry(address);
if (function == nullptr) continue;
- if (!IntrinsicHasNoSideEffect(function->function_id)) {
+ if (!BuiltinToIntrinsicHasNoSideEffect(
+ static_cast<Builtins::Name>(builtin_index),
+ function->function_id)) {
PrintF("Whitelisted builtin %s calls non-whitelisted intrinsic %s\n",
Builtins::name(builtin_index), function->name);
failed = true;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 70f3670ee4..a7426eb96e 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -43,8 +43,9 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
js_frame, inlined_frame_index, isolate));
} else if (frame_->is_wasm_interpreter_entry()) {
wasm_interpreted_frame_ =
- summary.AsWasm().wasm_instance()->debug_info()->GetInterpretedFrame(
- frame_->fp(), inlined_frame_index);
+ WasmInterpreterEntryFrame::cast(frame_)
+ ->debug_info()
+ ->GetInterpretedFrame(frame_->fp(), inlined_frame_index);
DCHECK(wasm_interpreted_frame_);
}
}
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index c8c1e76ef2..eef65f5100 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -5,7 +5,6 @@
#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
#define V8_DEBUG_DEBUG_INTERFACE_H_
-#include "include/v8-debug.h"
#include "include/v8-util.h"
#include "include/v8.h"
@@ -181,12 +180,10 @@ class DebugDelegate {
bool is_blackboxed) {}
virtual void ScriptCompiled(v8::Local<Script> script, bool is_live_edited,
bool has_compile_error) {}
- // |break_points_hit| contains installed by JS debug API breakpoint objects.
// |inspector_break_points_hit| contains id of breakpoints installed with
// debug::Script::SetBreakpoint API.
virtual void BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>& inspector_break_points_hit) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
@@ -500,6 +497,9 @@ int GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
int64_t GetNextRandomInt64(v8::Isolate* isolate);
+v8::MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
+ v8::Local<v8::String> source);
+
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index 2e06dccab6..9d843ed17e 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -176,8 +176,7 @@ v8::debug::ScopeIterator::ScopeType DebugWasmScopeIterator::GetType() {
v8::Local<v8::Object> DebugWasmScopeIterator::GetObject() {
DCHECK(!Done());
Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame_)->wasm_instance()->debug_info(),
- isolate_);
+ WasmInterpreterEntryFrame::cast(frame_)->debug_info(), isolate_);
switch (type_) {
case debug::ScopeIterator::ScopeTypeGlobal:
return Utils::ToLocal(WasmDebugInfo::GetGlobalScopeObject(
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 77654de635..fda85bd88d 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -121,10 +121,11 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
CollectNonLocals(info.get(), scope);
}
if (!ignore_nested_scopes) {
- DeclarationScope::Analyze(info.get());
- DeclarationScope::AllocateScopeInfos(info.get(), isolate_,
- AnalyzeMode::kDebugger);
- RetrieveScopeChain(scope);
+ if (DeclarationScope::Analyze(info.get())) {
+ DeclarationScope::AllocateScopeInfos(info.get(), isolate_,
+ AnalyzeMode::kDebugger);
+ RetrieveScopeChain(scope);
+ }
}
} else {
// A failed reparse indicates that the preparser has diverged from the
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index 6288c11b94..cc30ddee61 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -50,7 +50,7 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
continue;
}
FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
+ FeedbackNexus nexus(vector, slot);
Handle<String> name(info->DebugName(), isolate);
std::vector<int> source_positions = nexus.GetSourcePositions();
for (int position : source_positions) {
@@ -60,7 +60,7 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
}
// Releases type profile data collected so far.
- nexus.Clear();
+ nexus.ResetTypeProfile();
}
if (!entries->empty()) {
result->emplace_back(type_profile_script);
@@ -91,8 +91,8 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
DCHECK(info->IsSubjectToDebugging());
if (info->feedback_metadata()->HasTypeProfileSlot()) {
FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
- nexus.Clear();
+ FeedbackNexus nexus(vector, slot);
+ nexus.ResetTypeProfile();
}
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index c087a0868c..69eaeb6cad 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -12,7 +12,6 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
@@ -21,6 +20,7 @@
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/globals.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log.h"
@@ -29,8 +29,6 @@
#include "src/snapshot/natives.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "include/v8-debug.h"
-
namespace v8 {
namespace internal {
@@ -53,6 +51,9 @@ Debug::Debug(Isolate* isolate)
BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame) {
+ if (debug_info->CanBreakAtEntry()) {
+ return BreakLocation(Debug::kBreakAtEntryPosition, DEBUG_BREAK_AT_ENTRY);
+ }
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
@@ -64,6 +65,7 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
void BreakLocation::AllAtCurrentStatement(
Handle<DebugInfo> debug_info, JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out) {
+ DCHECK(!debug_info->CanBreakAtEntry());
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
@@ -81,6 +83,18 @@ void BreakLocation::AllAtCurrentStatement(
}
}
+JSGeneratorObject* BreakLocation::GetGeneratorObjectForSuspendedFrame(
+ JavaScriptFrame* frame) const {
+ DCHECK(IsSuspend());
+ DCHECK_GE(generator_obj_reg_index_, 0);
+
+ Object* generator_obj =
+ InterpretedFrame::cast(frame)->ReadInterpreterRegister(
+ generator_obj_reg_index_);
+
+ return JSGeneratorObject::cast(generator_obj);
+}
+
int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
Handle<AbstractCode> abstract_code,
int offset) {
@@ -103,13 +117,18 @@ int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
// First check whether there is a break point with the same source position.
if (!debug_info->HasBreakPoint(position_)) return false;
- // Then check whether a break point at that source position would have
- // the same code offset. Otherwise it's just a break location that we can
- // step to, but not actually a location where we can put a break point.
- DCHECK(abstract_code_->IsBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(position_);
- return it.code_offset() == code_offset_;
+ if (debug_info->CanBreakAtEntry()) {
+ DCHECK_EQ(Debug::kBreakAtEntryPosition, position_);
+ return debug_info->BreakAtEntry();
+ } else {
+ // Then check whether a break point at that source position would have
+ // the same code offset. Otherwise it's just a break location that we can
+ // step to, but not actually a location where we can put a break point.
+ DCHECK(abstract_code_->IsBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(position_);
+ return it.code_offset() == code_offset_;
+ }
}
debug::BreakLocationType BreakLocation::type() const {
@@ -120,10 +139,12 @@ debug::BreakLocationType BreakLocation::type() const {
return debug::kCallBreakLocation;
case DEBUG_BREAK_SLOT_AT_RETURN:
return debug::kReturnBreakLocation;
+
+ // Externally, suspend breaks should look like normal breaks.
+ case DEBUG_BREAK_SLOT_AT_SUSPEND:
default:
return debug::kCommonBreakLocation;
}
- return debug::kCommonBreakLocation;
}
BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
@@ -181,10 +202,18 @@ DebugBreakType BreakIterator::GetDebugBreakType() {
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ // Make sure we read the actual bytecode, not a prefix scaling bytecode.
+ if (interpreter::Bytecodes::IsPrefixScalingBytecode(bytecode)) {
+ bytecode = interpreter::Bytecodes::FromByte(
+ bytecode_array->get(code_offset() + 1));
+ }
+
if (bytecode == interpreter::Bytecode::kDebugger) {
return DEBUGGER_STATEMENT;
} else if (bytecode == interpreter::Bytecode::kReturn) {
return DEBUG_BREAK_SLOT_AT_RETURN;
+ } else if (bytecode == interpreter::Bytecode::kSuspendGenerator) {
+ return DEBUG_BREAK_SLOT_AT_SUSPEND;
} else if (interpreter::Bytecodes::IsCallOrConstruct(bytecode)) {
return DEBUG_BREAK_SLOT_AT_CALL;
} else if (source_position_iterator_.is_statement()) {
@@ -225,7 +254,25 @@ void BreakIterator::ClearDebugBreak() {
BreakLocation BreakIterator::GetBreakLocation() {
Handle<AbstractCode> code(
AbstractCode::cast(debug_info_->DebugBytecodeArray()));
- return BreakLocation(code, GetDebugBreakType(), code_offset(), position_);
+ DebugBreakType type = GetDebugBreakType();
+ int generator_object_reg_index = -1;
+ if (type == DEBUG_BREAK_SLOT_AT_SUSPEND) {
+ // For suspend break, we'll need the generator object to be able to step
+ // over the suspend as if it didn't return. We get the interpreter register
+ // index that holds the generator object by reading it directly off the
+ // bytecode array, and we'll read the actual generator object off the
+ // interpreter stack frame in GetGeneratorObjectForSuspendedFrame.
+ BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
+ interpreter::BytecodeArrayAccessor accessor(handle(bytecode_array),
+ code_offset());
+
+ DCHECK_EQ(accessor.current_bytecode(),
+ interpreter::Bytecode::kSuspendGenerator);
+ interpreter::Register generator_obj_reg = accessor.GetRegisterOperand(0);
+ generator_object_reg_index = generator_obj_reg.index();
+ }
+ return BreakLocation(code, type, code_offset(), position_,
+ generator_object_reg_index);
}
@@ -276,9 +323,11 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() { return 0; }
void Debug::Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kDebug, &thread_local_.return_value_);
- v->VisitRootPointer(Root::kDebug, &thread_local_.suspended_generator_);
- v->VisitRootPointer(Root::kDebug, &thread_local_.ignore_step_into_function_);
+ v->VisitRootPointer(Root::kDebug, nullptr, &thread_local_.return_value_);
+ v->VisitRootPointer(Root::kDebug, nullptr,
+ &thread_local_.suspended_generator_);
+ v->VisitRootPointer(Root::kDebug, nullptr,
+ &thread_local_.ignore_step_into_function_);
}
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info) : next_(nullptr) {
@@ -346,7 +395,7 @@ void Debug::Unload() {
debug_context_ = Handle<Context>();
}
-void Debug::Break(JavaScriptFrame* frame) {
+void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Initialize LiveEdit.
LiveEdit::InitializeThreadLocal(this);
@@ -362,8 +411,7 @@ void Debug::Break(JavaScriptFrame* frame) {
DisableBreak no_recursive_break(this);
// Return if we fail to retrieve debug info.
- Handle<JSFunction> function(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(break_target->shared());
if (!EnsureBreakInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
@@ -381,6 +429,14 @@ void Debug::Break(JavaScriptFrame* frame) {
return;
}
+ // Debug break at function entry, do not worry about stepping.
+ if (location.IsDebugBreakAtEntry()) {
+ DCHECK(debug_info->BreakAtEntry());
+ return;
+ }
+
+ DCHECK_NOT_NULL(frame);
+
// No break point. Check for stepping.
StepAction step_action = last_step_action();
int current_frame_count = CurrentFrameCount();
@@ -390,7 +446,7 @@ void Debug::Break(JavaScriptFrame* frame) {
// StepOut at not return position was requested and return break locations
// were flooded with one shots.
if (thread_local_.fast_forward_to_return_) {
- DCHECK(location.IsReturn());
+ DCHECK(location.IsReturnOrSuspend());
// We have to ignore recursive calls to function.
if (current_frame_count > target_frame_count) return;
ClearStepping();
@@ -410,8 +466,17 @@ void Debug::Break(JavaScriptFrame* frame) {
case StepNext:
// Step next should not break in a deeper frame than target frame.
if (current_frame_count > target_frame_count) return;
- // Fall through.
+ V8_FALLTHROUGH;
case StepIn: {
+ // Special case "next" and "in" for generators that are about to suspend.
+ if (location.IsSuspend()) {
+ DCHECK(!has_suspended_generator());
+ thread_local_.suspended_generator_ =
+ location.GetGeneratorObjectForSuspendedFrame(frame);
+ ClearStepping();
+ return;
+ }
+
FrameSummary summary = FrameSummary::GetTop(frame);
step_break = step_break || location.IsReturn() ||
current_frame_count != last_frame_count ||
@@ -445,9 +510,9 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
if (has_break_points) *has_break_points = has_break_points_to_check;
if (!has_break_points_to_check) return {};
- Handle<Object> break_point_objects =
- debug_info->GetBreakPointObjects(location->position());
- return Debug::GetHitBreakPointObjects(break_point_objects);
+ Handle<Object> break_points =
+ debug_info->GetBreakPoints(location->position());
+ return Debug::GetHitBreakPoints(break_points);
}
@@ -502,52 +567,27 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
// Check whether a single break point object is triggered.
-bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- Factory* factory = isolate_->factory();
+bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point) {
HandleScope scope(isolate_);
- // TODO(kozyatinskiy): replace this if by DCHEK once the JS debug API has been
- // removed.
- if (break_point_object->IsBreakPoint()) {
- Handle<BreakPoint> break_point =
- Handle<BreakPoint>::cast(break_point_object);
- if (!break_point->condition()->length()) return true;
- Handle<String> condition(break_point->condition());
- Handle<Object> result;
- // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
- // we can use 0 as index of inlined frame.
- if (!DebugEvaluate::Local(isolate_, break_frame_id(),
- /* inlined_jsframe_index */ 0, condition, false)
- .ToHandle(&result)) {
- if (isolate_->has_pending_exception()) {
- isolate_->clear_pending_exception();
- }
- return false;
- }
- return result->BooleanValue();
- }
-
- // Ignore check if break point object is not a JSObject.
- if (!break_point_object->IsJSObject()) return true;
-
- // Get the break id as an object.
- Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
-
- // Call IsBreakPointTriggered.
- Handle<Object> argv[] = { break_id, break_point_object };
+ if (!break_point->condition()->length()) return true;
+ Handle<String> condition(break_point->condition());
Handle<Object> result;
- if (!CallFunction("IsBreakPointTriggered", arraysize(argv), argv)
+ // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
+ // we can use 0 as index of inlined frame.
+ if (!DebugEvaluate::Local(isolate_, break_frame_id(),
+ /* inlined_jsframe_index */ 0, condition, false)
.ToHandle(&result)) {
+ if (isolate_->has_pending_exception()) {
+ isolate_->clear_pending_exception();
+ }
return false;
}
-
- // Return whether the break point is triggered.
- return result->IsTrue(isolate_);
+ return result->BooleanValue();
}
-
bool Debug::SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position) {
HandleScope scope(isolate_);
@@ -561,7 +601,7 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
*source_position = FindBreakablePosition(debug_info, *source_position);
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
// At least one active break point now.
DCHECK_LT(0, debug_info->GetBreakPointCount());
@@ -573,13 +613,13 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
}
bool Debug::SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position) {
if (script->type() == Script::TYPE_WASM) {
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
return WasmCompiledModule::SetBreakPoint(compiled_module, source_position,
- break_point_object);
+ break_point);
}
HandleScope scope(isolate_);
@@ -609,7 +649,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
if (breakable_position < *source_position) return false;
*source_position = breakable_position;
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
// At least one active break point now.
DCHECK_LT(0, debug_info->GetBreakPointCount());
@@ -622,48 +662,60 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
int source_position) {
- DCHECK(debug_info->HasDebugBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(source_position);
- return it.position();
+ if (debug_info->CanBreakAtEntry()) {
+ return kBreakAtEntryPosition;
+ } else {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(source_position);
+ return it.position();
+ }
}
void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
DisallowHeapAllocation no_gc;
- if (debug_info->break_points()->IsUndefined(isolate_)) return;
- FixedArray* break_points = debug_info->break_points();
- for (int i = 0; i < break_points->length(); i++) {
- if (break_points->get(i)->IsUndefined(isolate_)) continue;
- BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
- if (info->GetBreakPointCount() == 0) continue;
- DCHECK(debug_info->HasDebugBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(info->source_position());
- it.SetDebugBreak();
+ if (debug_info->CanBreakAtEntry()) {
+ debug_info->SetBreakAtEntry();
+ } else {
+ if (!debug_info->HasDebugBytecodeArray()) return;
+ FixedArray* break_points = debug_info->break_points();
+ for (int i = 0; i < break_points->length(); i++) {
+ if (break_points->get(i)->IsUndefined(isolate_)) continue;
+ BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
+ if (info->GetBreakPointCount() == 0) continue;
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(info->source_position());
+ it.SetDebugBreak();
+ }
}
}
void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
- // If we attempt to clear breakpoints but none exist, simply return. This can
- // happen e.g. CoverageInfos exit but no breakpoints are set.
- if (!debug_info->HasDebugBytecodeArray()) return;
+ if (debug_info->CanBreakAtEntry()) {
+ debug_info->ClearBreakAtEntry();
+ } else {
+ // If we attempt to clear breakpoints but none exist, simply return. This
+ // can happen e.g. CoverageInfos exist but no breakpoints are set.
+ if (!debug_info->HasDebugBytecodeArray()) return;
- DisallowHeapAllocation no_gc;
- for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
- it.ClearDebugBreak();
+ DisallowHeapAllocation no_gc;
+ for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
+ it.ClearDebugBreak();
+ }
}
}
-void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
+void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
HandleScope scope(isolate_);
for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
Handle<Object> result =
- DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
+ DebugInfo::FindBreakPointInfo(node->debug_info(), break_point);
if (result->IsUndefined(isolate_)) continue;
Handle<DebugInfo> debug_info = node->debug_info();
- if (DebugInfo::ClearBreakPoint(debug_info, break_point_object)) {
+ if (DebugInfo::ClearBreakPoint(debug_info, break_point)) {
ClearBreakPoints(debug_info);
if (debug_info->GetBreakPointCount() == 0) {
RemoveBreakInfoAndMaybeFree(debug_info);
@@ -707,7 +759,7 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
// Flood the function with break points.
DCHECK(debug_info->HasDebugBytecodeArray());
for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
- if (returns_only && !it.GetBreakLocation().IsReturn()) continue;
+ if (returns_only && !it.GetBreakLocation().IsReturnOrSuspend()) continue;
it.SetDebugBreak();
}
}
@@ -729,25 +781,24 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
}
-MaybeHandle<FixedArray> Debug::GetHitBreakPointObjects(
- Handle<Object> break_point_objects) {
- DCHECK(!break_point_objects->IsUndefined(isolate_));
- if (!break_point_objects->IsFixedArray()) {
- if (!CheckBreakPoint(break_point_objects)) return {};
+MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<Object> break_points) {
+ DCHECK(!break_points->IsUndefined(isolate_));
+ if (!break_points->IsFixedArray()) {
+ if (!CheckBreakPoint(Handle<BreakPoint>::cast(break_points))) return {};
Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
- break_points_hit->set(0, *break_point_objects);
+ break_points_hit->set(0, *break_points);
return break_points_hit;
}
- Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ Handle<FixedArray> array(FixedArray::cast(*break_points));
int num_objects = array->length();
Handle<FixedArray> break_points_hit =
isolate_->factory()->NewFixedArray(num_objects);
int break_points_hit_count = 0;
for (int i = 0; i < num_objects; ++i) {
- Handle<Object> break_point_object(array->get(i), isolate_);
- if (CheckBreakPoint(break_point_object)) {
- break_points_hit->set(break_points_hit_count++, *break_point_object);
+ Handle<Object> break_point(array->get(i), isolate_);
+ if (CheckBreakPoint(Handle<BreakPoint>::cast(break_point))) {
+ break_points_hit->set(break_points_hit_count++, *break_point);
}
}
if (break_points_hit_count == 0) return {};
@@ -824,11 +875,10 @@ void Debug::PrepareStepOnThrow() {
if (summaries.size() > 1) {
Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
- BytecodeArray* bytecode = code->GetBytecodeArray();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ HandlerTable table(code->GetBytecodeArray());
int code_offset = summary.code_offset();
HandlerTable::CatchPrediction prediction;
- int index = table->LookupRange(code_offset, nullptr, &prediction);
+ int index = table.LookupRange(code_offset, nullptr, &prediction);
if (index > 0) found_handler = true;
} else {
found_handler = true;
@@ -879,7 +929,7 @@ void Debug::PrepareStep(StepAction step_action) {
if (frame->is_wasm_compiled()) return;
WasmInterpreterEntryFrame* wasm_frame =
WasmInterpreterEntryFrame::cast(frame);
- wasm_frame->wasm_instance()->debug_info()->PrepareStep(step_action);
+ wasm_frame->debug_info()->PrepareStep(step_action);
return;
}
@@ -895,9 +945,9 @@ void Debug::PrepareStep(StepAction step_action) {
BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
- // Any step at a return is a step-out and we need to schedule DebugOnFunction
- // call callback.
- if (location.IsReturn()) {
+ // Any step at a return is a step-out, and a step-out at a suspend behaves
+ // like a return.
+ if (location.IsReturn() || (location.IsSuspend() && step_action == StepOut)) {
// On StepOut we'll ignore our further calls to current function in
// PrepareStepIn callback.
if (last_step_action() == StepOut) {
@@ -906,6 +956,8 @@ void Debug::PrepareStep(StepAction step_action) {
step_action = StepOut;
thread_local_.last_step_action_ = StepIn;
}
+
+ // We need to schedule DebugOnFunction call callback
UpdateHookOnFunctionCall();
// A step-next in blackboxed function is a step-out.
@@ -926,7 +978,7 @@ void Debug::PrepareStep(StepAction step_action) {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_frame_count_ = -1;
- if (!location.IsReturn() && !IsBlackboxed(shared)) {
+ if (!location.IsReturnOrSuspend() && !IsBlackboxed(shared)) {
// At not return position we flood return positions with one shots and
// will repeat StepOut automatically at next break.
thread_local_.target_frame_count_ = current_frame_count;
@@ -966,7 +1018,7 @@ void Debug::PrepareStep(StepAction step_action) {
}
case StepNext:
thread_local_.target_frame_count_ = current_frame_count;
- // Fall through.
+ V8_FALLTHROUGH;
case StepIn:
// TODO(clemensh): Implement stepping from JS into wasm.
FloodWithOneShot(shared);
@@ -1060,10 +1112,7 @@ class RedirectActiveFunctions : public ThreadVisitor {
void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// Deoptimize all code compiled from this shared function info including
// inlining.
- if (isolate_->concurrent_recompilation_enabled()) {
- isolate_->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kBlock);
- }
+ isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
@@ -1094,11 +1143,16 @@ void Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
if (debug_info->IsPreparedForBreakpoints()) return;
- DeoptimizeFunction(shared);
- // Update PCs on the stack to point to recompiled code.
- RedirectActiveFunctions redirect_visitor(*shared);
- redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
- isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
+ if (debug_info->CanBreakAtEntry()) {
+ // Deopt everything in case the function is inlined anywhere.
+ Deoptimizer::DeoptimizeAll(isolate_);
+ } else {
+ DeoptimizeFunction(shared);
+ // Update PCs on the stack to point to recompiled code.
+ RedirectActiveFunctions redirect_visitor(*shared);
+ redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
+ isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
+ }
debug_info->set_flags(debug_info->flags() |
DebugInfo::kPreparedForBreakpoints);
@@ -1184,19 +1238,6 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
UNREACHABLE();
}
-void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
- if (last_step_action() <= StepOut) return;
-
- if (last_step_action() == StepNext) {
- // Only consider this generator a step-next target if not stepping in.
- if (thread_local_.target_frame_count_ < CurrentFrameCount()) return;
- }
-
- DCHECK(!has_suspended_generator());
- thread_local_.suspended_generator_ = *generator_object;
- ClearStepping();
-}
-
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
@@ -1300,7 +1341,9 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the break info for shared.
if (shared->HasBreakInfo()) return true;
- if (!shared->IsSubjectToDebugging()) return false;
+ if (!shared->IsSubjectToDebugging() && !CanBreakAtEntry(shared)) {
+ return false;
+ }
if (!shared->is_compiled() &&
!Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION)) {
return false;
@@ -1328,7 +1371,10 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
maybe_debug_bytecode_array = factory->CopyBytecodeArray(original);
}
- debug_info->set_flags(debug_info->flags() | DebugInfo::kHasBreakInfo);
+ int flags = debug_info->flags();
+ flags |= DebugInfo::kHasBreakInfo;
+ if (CanBreakAtEntry(shared)) flags |= DebugInfo::kCanBreakAtEntry;
+ debug_info->set_flags(flags);
debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
debug_info->set_break_points(*break_points);
}
@@ -1494,14 +1540,6 @@ MaybeHandle<Object> Debug::MakeExecutionState() {
}
-MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
- // Create the new break event object.
- Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
- break_points_hit };
- return CallFunction("MakeBreakEvent", arraysize(argv), argv);
-}
-
-
MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
bool uncaught,
Handle<Object> promise) {
@@ -1677,29 +1715,15 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
int inspector_break_points_count = 0;
// This array contains breakpoints installed using JS debug API.
for (int i = 0; i < break_points_hit->length(); ++i) {
- Object* break_point = break_points_hit->get(i);
- if (break_point->IsBreakPoint()) {
- inspector_break_points_hit.push_back(BreakPoint::cast(break_point)->id());
- ++inspector_break_points_count;
- } else {
- break_points_hit->set(i - inspector_break_points_count, break_point);
- }
- }
- int break_points_length =
- break_points_hit->length() - inspector_break_points_count;
- Handle<Object> break_points;
- if (break_points_length) {
- break_points_hit->Shrink(break_points_length);
- break_points = isolate_->factory()->NewJSArrayWithElements(
- break_points_hit, PACKED_ELEMENTS, break_points_length);
- } else {
- break_points = isolate_->factory()->undefined_value();
+ BreakPoint* break_point = BreakPoint::cast(break_points_hit->get(i));
+ inspector_break_points_hit.push_back(break_point->id());
+ ++inspector_break_points_count;
}
debug_delegate_->BreakProgramRequested(
GetDebugEventContext(isolate_),
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
- v8::Utils::ToLocal(break_points), inspector_break_points_hit);
+ inspector_break_points_hit);
}
@@ -1860,6 +1884,16 @@ bool Debug::AllFramesOnStackAreBlackboxed() {
return true;
}
+bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) {
+ // Allow break at entry for builtin functions.
+ if (shared->native()) {
+ // Functions that are subject to debugging can have regular breakpoints.
+ DCHECK(!shared->IsSubjectToDebugging());
+ return true;
+ }
+ return false;
+}
+
bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source,
bool preview, bool* stack_changed) {
DebugScope debug_scope(this);
@@ -1971,6 +2005,7 @@ void Debug::UpdateHookOnFunctionCall() {
STATIC_ASSERT(LastStepAction == StepIn);
hook_on_function_call_ = thread_local_.last_step_action_ == StepIn ||
isolate_->needs_side_effect_check();
+ DCHECK_IMPLIES(hook_on_function_call_, is_active_);
}
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
@@ -2146,7 +2181,6 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
return false;
}
- Deoptimizer::DeoptimizeFunction(*function);
if (!SharedFunctionInfo::HasNoSideEffect(handle(function->shared()))) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
@@ -2195,16 +2229,9 @@ void LegacyDebugDelegate::ScriptCompiled(v8::Local<v8::debug::Script> script,
void LegacyDebugDelegate::BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>&) {
- Handle<Object> event_data;
- if (isolate_->debug()
- ->MakeBreakEvent(v8::Utils::OpenHandle(*break_points_hit))
- .ToHandle(&event_data)) {
- ProcessDebugEvent(
- v8::Break, Handle<JSObject>::cast(event_data),
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
- }
+ ProcessDebugEvent(v8::Break, isolate_->factory()->NewJSObjectWithNullProto(),
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
}
void LegacyDebugDelegate::ExceptionThrown(v8::Local<v8::Context> paused_context,
@@ -2231,32 +2258,6 @@ void LegacyDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
}
}
-JavaScriptDebugDelegate::JavaScriptDebugDelegate(Isolate* isolate,
- Handle<JSFunction> listener,
- Handle<Object> data)
- : LegacyDebugDelegate(isolate) {
- GlobalHandles* global_handles = isolate->global_handles();
- listener_ = global_handles->Create(*listener);
- data_ = global_handles->Create(*data);
-}
-
-JavaScriptDebugDelegate::~JavaScriptDebugDelegate() {
- GlobalHandles::Destroy(Handle<Object>::cast(listener_).location());
- GlobalHandles::Destroy(data_.location());
-}
-
-void JavaScriptDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- Handle<JSObject> exec_state) {
- AllowJavascriptExecutionDebugOnly allow_script(isolate_);
- Handle<Object> argv[] = {Handle<Object>(Smi::FromInt(event), isolate_),
- exec_state, event_data, data_};
- Handle<JSReceiver> global = isolate_->global_proxy();
- // Listener must not throw.
- Execution::Call(isolate_, listener_, global, arraysize(argv), argv)
- .ToHandleChecked();
-}
-
NativeDebugDelegate::NativeDebugDelegate(Isolate* isolate,
v8::Debug::EventCallback callback,
Handle<Object> data)
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index aec66f2f35..4ea9c2b872 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -25,12 +25,9 @@
#include "src/string-stream.h"
#include "src/v8threads.h"
-#include "include/v8-debug.h"
-
namespace v8 {
namespace internal {
-
// Forward declarations.
class DebugScope;
@@ -51,13 +48,14 @@ enum ExceptionBreakType {
BreakUncaughtException = 1
};
-
enum DebugBreakType {
NOT_DEBUG_BREAK,
DEBUGGER_STATEMENT,
DEBUG_BREAK_SLOT,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_RETURN,
+ DEBUG_BREAK_SLOT_AT_SUSPEND,
+ DEBUG_BREAK_AT_ENTRY,
};
enum IgnoreBreakMode {
@@ -74,12 +72,20 @@ class BreakLocation {
JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out);
+ inline bool IsSuspend() const { return type_ == DEBUG_BREAK_SLOT_AT_SUSPEND; }
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
+ inline bool IsReturnOrSuspend() const {
+ return type_ >= DEBUG_BREAK_SLOT_AT_RETURN;
+ }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
inline bool IsDebuggerStatement() const {
return type_ == DEBUGGER_STATEMENT;
}
+ inline bool IsDebugBreakAtEntry() const {
+ bool result = type_ == DEBUG_BREAK_AT_ENTRY;
+ return result;
+ }
bool HasBreakPoint(Handle<DebugInfo> debug_info) const;
@@ -87,16 +93,26 @@ class BreakLocation {
debug::BreakLocationType type() const;
+ JSGeneratorObject* GetGeneratorObjectForSuspendedFrame(
+ JavaScriptFrame* frame) const;
+
private:
BreakLocation(Handle<AbstractCode> abstract_code, DebugBreakType type,
- int code_offset, int position)
+ int code_offset, int position, int generator_obj_reg_index)
: abstract_code_(abstract_code),
code_offset_(code_offset),
type_(type),
- position_(position) {
+ position_(position),
+ generator_obj_reg_index_(generator_obj_reg_index) {
DCHECK_NE(NOT_DEBUG_BREAK, type_);
}
+ BreakLocation(int position, DebugBreakType type)
+ : code_offset_(0),
+ type_(type),
+ position_(position),
+ generator_obj_reg_index_(0) {}
+
static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
Handle<AbstractCode> abstract_code,
int offset);
@@ -108,6 +124,7 @@ class BreakLocation {
int code_offset_;
DebugBreakType type_;
int position_;
+ int generator_obj_reg_index_;
friend class BreakIterator;
};
@@ -215,19 +232,20 @@ class Debug {
// Internal logic
bool Load();
- void Break(JavaScriptFrame* frame);
+ // The break target may not be the top-most frame, since we may be
+ // breaking before entering a function that cannot contain break points.
+ void Break(JavaScriptFrame* frame, Handle<JSFunction> break_target);
// Scripts handling.
Handle<FixedArray> GetLoadedScripts();
// Break point handling.
bool SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
- int* source_position);
+ Handle<BreakPoint> break_point, int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position);
- void ClearBreakPoint(Handle<Object> break_point_object);
+ void ClearBreakPoint(Handle<BreakPoint> break_point);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
@@ -235,12 +253,11 @@ class Debug {
int* offset, int* id);
void RemoveBreakpoint(int id);
- // The parameter is either a BreakPointInfo object, or a FixedArray of
- // BreakPointInfo objects.
+ // The parameter is either a BreakPoint object, or a FixedArray of
+ // BreakPoint objects.
// Returns an empty handle if no breakpoint is hit, or a FixedArray with all
- // hit breakpoints.
- MaybeHandle<FixedArray> GetHitBreakPointObjects(
- Handle<Object> break_point_objects);
+ // hit BreakPoint objects.
+ MaybeHandle<FixedArray> GetHitBreakPoints(Handle<Object> break_points);
// Stepping handling.
void PrepareStep(StepAction step_action);
@@ -256,8 +273,6 @@ class Debug {
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
- void RecordGenerator(Handle<JSGeneratorObject> generator_object);
-
void RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
Handle<Object> parent);
@@ -265,6 +280,8 @@ class Debug {
bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
+ bool CanBreakAtEntry(Handle<SharedFunctionInfo> shared);
+
void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
// Returns whether the operation succeeded.
@@ -339,6 +356,10 @@ class Debug {
inline bool in_debug_scope() const {
return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
}
+ inline bool needs_check_on_function_call() const {
+ return hook_on_function_call_;
+ }
+
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }
@@ -376,6 +397,10 @@ class Debug {
DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
+ // For functions in which we cannot set a break point, use a canonical
+ // source position for break points.
+ static const int kBreakAtEntryPosition = 0;
+
private:
explicit Debug(Isolate* isolate);
~Debug() { DCHECK_NULL(debug_delegate_); }
@@ -410,8 +435,6 @@ class Debug {
// Constructors for debug event objects.
MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState();
- MUST_USE_RESULT MaybeHandle<Object> MakeBreakEvent(
- Handle<Object> break_points_hit);
MUST_USE_RESULT MaybeHandle<Object> MakeExceptionEvent(
Handle<Object> exception,
bool uncaught,
@@ -445,7 +468,7 @@ class Debug {
BreakLocation* location,
bool* has_break_points = nullptr);
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
- bool CheckBreakPoint(Handle<Object> break_point_object);
+ bool CheckBreakPoint(Handle<BreakPoint> break_point);
MaybeHandle<Object> CallFunction(const char* name, int argc,
Handle<Object> args[],
bool catch_exceptions = true);
@@ -577,7 +600,6 @@ class LegacyDebugDelegate : public v8::debug::DebugDelegate {
bool has_compile_error) override;
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>&) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
@@ -599,20 +621,6 @@ class LegacyDebugDelegate : public v8::debug::DebugDelegate {
Handle<JSObject> exec_state) = 0;
};
-class JavaScriptDebugDelegate : public LegacyDebugDelegate {
- public:
- JavaScriptDebugDelegate(Isolate* isolate, Handle<JSFunction> listener,
- Handle<Object> data);
- virtual ~JavaScriptDebugDelegate();
-
- private:
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
- Handle<JSObject> exec_state) override;
-
- Handle<JSFunction> listener_;
- Handle<Object> data_;
-};
-
class NativeDebugDelegate : public LegacyDebugDelegate {
public:
NativeDebugDelegate(Isolate* isolate, v8::Debug::EventCallback callback,
@@ -630,7 +638,6 @@ class NativeDebugDelegate : public LegacyDebugDelegate {
virtual v8::Local<v8::Object> GetEventData() const;
virtual v8::Local<v8::Context> GetEventContext() const;
virtual v8::Local<v8::Value> GetCallbackData() const;
- virtual v8::Debug::ClientData* GetClientData() const { return nullptr; }
virtual v8::Isolate* GetIsolate() const;
private:
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 43be3c424a..97a0886ca7 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -19,16 +19,8 @@ var ValueMirror = global.ValueMirror;
//----------------------------------------------------------------------------
-// Default number of frames to include in the response to backtrace request.
-var kDefaultBacktraceLength = 10;
-
var Debug = {};
-// Regular expression to skip "crud" at the beginning of a source line which is
-// not really code. Currently the regular expression matches whitespace and
-// comments.
-var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
-
// Debug events which can occur in the V8 JavaScript engine. These originate
// from the API include file debug.h.
Debug.DebugEvent = { Break: 1,
@@ -58,29 +50,12 @@ Debug.ScriptCompilationType = { Host: 0,
Eval: 1,
JSON: 2 };
-// The different script break point types.
-Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1,
- ScriptRegExp: 2 };
-
function ScriptTypeFlag(type) {
return (1 << type);
}
// Globals.
-var next_response_seq = 0;
-var next_break_point_number = 1;
-var break_points = [];
-var script_break_points = [];
var debugger_flags = {
- breakPointsActive: {
- value: true,
- getValue: function() { return this.value; },
- setValue: function(value) {
- this.value = !!value;
- %SetBreakPointsActive(this.value);
- }
- },
breakOnCaughtException: {
getValue: function() { return Debug.isBreakOnException(); },
setValue: function(value) {
@@ -104,308 +79,6 @@ var debugger_flags = {
};
-// Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_script_break_point) {
- var break_point = new BreakPoint(source_position, opt_script_break_point);
- break_points.push(break_point);
- return break_point;
-}
-
-
-// Object representing a break point.
-// NOTE: This object does not have a reference to the function having break
-// point as this would cause function not to be garbage collected when it is
-// not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_script_break_point) {
- this.source_position_ = source_position;
- if (opt_script_break_point) {
- this.script_break_point_ = opt_script_break_point;
- } else {
- this.number_ = next_break_point_number++;
- }
- this.active_ = true;
- this.condition_ = null;
-}
-
-
-BreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-BreakPoint.prototype.func = function() {
- return this.func_;
-};
-
-
-BreakPoint.prototype.source_position = function() {
- return this.source_position_;
-};
-
-
-BreakPoint.prototype.active = function() {
- if (this.script_break_point()) {
- return this.script_break_point().active();
- }
- return this.active_;
-};
-
-
-BreakPoint.prototype.condition = function() {
- if (this.script_break_point() && this.script_break_point().condition()) {
- return this.script_break_point().condition();
- }
- return this.condition_;
-};
-
-
-BreakPoint.prototype.script_break_point = function() {
- return this.script_break_point_;
-};
-
-
-BreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-BreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-BreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-BreakPoint.prototype.isTriggered = function(exec_state) {
- // Break point not active - not triggered.
- if (!this.active()) return false;
-
- // Check for conditional break point.
- if (this.condition()) {
- // If break point has condition try to evaluate it in the top frame.
- try {
- var mirror = exec_state.frame(0).evaluate(this.condition());
- // If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !mirror.value_) {
- return false;
- }
- } catch (e) {
- // Exception evaluating condition counts as not triggered.
- return false;
- }
- }
-
- // Break point triggered.
- return true;
-};
-
-
-// Function called from the runtime when a break point is hit. Returns true if
-// the break point is triggered and supposed to break execution.
-function IsBreakPointTriggered(break_id, break_point) {
- return break_point.isTriggered(MakeExecutionState(break_id));
-}
-
-
-// Object representing a script break point. The script is referenced by its
-// script name or script id and the break point is represented as line and
-// column.
-function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
- this.type_ = type;
- if (type == Debug.ScriptBreakPointType.ScriptId) {
- this.script_id_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptName) {
- this.script_name_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
- this.script_regexp_object_ = new GlobalRegExp(script_id_or_name);
- } else {
- throw %make_error(kDebugger, "Unexpected breakpoint type " + type);
- }
- this.line_ = opt_line || 0;
- this.column_ = opt_column;
- this.groupId_ = opt_groupId;
- this.active_ = true;
- this.condition_ = null;
- this.break_points_ = [];
-}
-
-
-ScriptBreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-ScriptBreakPoint.prototype.groupId = function() {
- return this.groupId_;
-};
-
-
-ScriptBreakPoint.prototype.type = function() {
- return this.type_;
-};
-
-
-ScriptBreakPoint.prototype.script_id = function() {
- return this.script_id_;
-};
-
-
-ScriptBreakPoint.prototype.script_name = function() {
- return this.script_name_;
-};
-
-
-ScriptBreakPoint.prototype.script_regexp_object = function() {
- return this.script_regexp_object_;
-};
-
-
-ScriptBreakPoint.prototype.line = function() {
- return this.line_;
-};
-
-
-ScriptBreakPoint.prototype.column = function() {
- return this.column_;
-};
-
-
-ScriptBreakPoint.prototype.actual_locations = function() {
- var locations = [];
- for (var i = 0; i < this.break_points_.length; i++) {
- locations.push(this.break_points_[i].actual_location);
- }
- return locations;
-};
-
-
-ScriptBreakPoint.prototype.update_positions = function(line, column) {
- this.line_ = line;
- this.column_ = column;
-};
-
-
-ScriptBreakPoint.prototype.active = function() {
- return this.active_;
-};
-
-
-ScriptBreakPoint.prototype.condition = function() {
- return this.condition_;
-};
-
-
-ScriptBreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-ScriptBreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-ScriptBreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-// Check whether a script matches this script break point. Currently this is
-// only based on script name.
-ScriptBreakPoint.prototype.matchesScript = function(script) {
- if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
- return this.script_id_ == script.id;
- } else {
- // We might want to account columns here as well.
- if (!(script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + %ScriptLineCount(script))) {
- return false;
- }
- if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
- return this.script_name_ == script.nameOrSourceURL();
- } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
- return this.script_regexp_object_.test(script.nameOrSourceURL());
- } else {
- throw %make_error(kDebugger, "Unexpected breakpoint type " + this.type_);
- }
- }
-};
-
-
-// Set the script break point in a script.
-ScriptBreakPoint.prototype.set = function (script) {
- var column = this.column();
- var line = this.line();
- // If the column is undefined the break is on the line. To help locate the
- // first piece of breakable code on the line try to find the column on the
- // line which contains some source.
- if (IS_UNDEFINED(column)) {
- var source_line = %ScriptSourceLine(script, line || script.line_offset);
-
- // Allocate array for caching the columns where the actual source starts.
- if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new GlobalArray(%ScriptLineCount(script));
- }
-
- // Fill cache if needed and get column where the actual source starts.
- if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
- script.sourceColumnStart_[line] =
- source_line.match(sourceLineBeginningSkip)[0].length;
- }
- column = script.sourceColumnStart_[line];
- }
-
- // Convert the line and column into an absolute position within the script.
- var position = Debug.findScriptSourcePosition(script, this.line(), column);
-
- // If the position is not found in the script (the script might be shorter
- // than it used to be) just ignore it.
- if (IS_NULL(position)) return;
-
- // Create a break point object and set the break point.
- var break_point = MakeBreakPoint(position, this);
- var actual_position = %SetScriptBreakPoint(script, position,
- break_point);
- if (IS_UNDEFINED(actual_position)) {
- actual_position = position;
- }
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- this.break_points_.push(break_point);
- return break_point;
-};
-
-
-// Clear all the break points created from this script break point
-ScriptBreakPoint.prototype.clear = function () {
- var remaining_break_points = [];
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].script_break_point() &&
- break_points[i].script_break_point() === this) {
- %ClearBreakPoint(break_points[i]);
- } else {
- remaining_break_points.push(break_points[i]);
- }
- }
- break_points = remaining_break_points;
- this.break_points_ = [];
-};
-
-
-Debug.setListener = function(listener, opt_data) {
- if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw %make_type_error(kDebuggerType);
- }
- %SetDebugEventListener(listener, opt_data);
-};
-
-
// Returns a Script object. If the parameter is a function the return value
// is the script in which the function is defined. If the parameter is a string
// the return value is the script for which the script name has that string
@@ -475,246 +148,6 @@ Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
return location ? location.position : null;
};
-
-Debug.findBreakPoint = function(break_point_number, remove) {
- var break_point;
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- break_point = break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- break_points.splice(i, 1);
- }
- break;
- }
- }
- if (break_point) {
- return break_point;
- } else {
- return this.findScriptBreakPoint(break_point_number, remove);
- }
-};
-
-Debug.findBreakPointActualLocations = function(break_point_number) {
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- return script_break_points[i].actual_locations();
- }
- }
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- return [break_points[i].actual_location];
- }
- }
- return [];
-};
-
-Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw %make_type_error(kDebuggerType);
- // Break points in API functions are not supported.
- if (%FunctionIsAPIFunction(func)) {
- throw %make_error(kDebugger, 'Cannot set break point in native code.');
- }
- // Find source position.
- var source_position =
- this.findFunctionSourceLocation(func, opt_line, opt_column).position;
- // Find the script for the function.
- var script = %FunctionGetScript(func);
- // Break in builtin JavaScript code is not supported.
- if (script.type == Debug.ScriptType.Native) {
- throw %make_error(kDebugger, 'Cannot set break point in native code.');
- }
- // If the script for the function has a name convert this to a script break
- // point.
- if (script && script.id) {
- // Find line and column for the position in the script and set a script
- // break point from that.
- var location = script.locationFromPosition(source_position, false);
- return this.setScriptBreakPointById(script.id,
- location.line, location.column,
- opt_condition);
- } else {
- // Set a break point directly on the function.
- var break_point = MakeBreakPoint(source_position);
- var actual_position =
- %SetFunctionBreakPoint(func, source_position, break_point);
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- break_point.setCondition(opt_condition);
- return break_point.number();
- }
-};
-
-
-Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
-{
- var break_point = MakeBreakPoint(position);
- break_point.setCondition(condition);
- if (!enabled) {
- break_point.disable();
- }
- var script = scriptById(script_id);
- if (script) {
- break_point.actual_position = %SetScriptBreakPoint(script, position, break_point);
- }
- return break_point;
-};
-
-
-Debug.enableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.enable();
- }
-};
-
-
-Debug.disableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.disable();
- }
-};
-
-
-Debug.changeBreakPointCondition = function(break_point_number, condition) {
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setCondition(condition);
-};
-
-
-Debug.clearBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, true);
- if (break_point) {
- return %ClearBreakPoint(break_point);
- } else {
- break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) throw %make_error(kDebugger, 'Invalid breakpoint');
- }
-};
-
-
-Debug.clearAllBreakPoints = function() {
- for (var i = 0; i < break_points.length; i++) {
- var break_point = break_points[i];
- %ClearBreakPoint(break_point);
- }
- break_points = [];
-};
-
-
-Debug.disableAllBreakPoints = function() {
- // Disable all user defined breakpoints:
- for (var i = 1; i < next_break_point_number; i++) {
- Debug.disableBreakPoint(i);
- }
- // Disable all exception breakpoints:
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-
-Debug.findScriptBreakPoint = function(break_point_number, remove) {
- var script_break_point;
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- script_break_point = script_break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- script_break_point.clear();
- script_break_points.splice(i,1);
- }
- break;
- }
- }
- return script_break_point;
-};
-
-
-// Sets a breakpoint in a script identified through id or name at the
-// specified source line and column within that line.
-Debug.setScriptBreakPoint = function(type, script_id_or_name,
- opt_line, opt_column, opt_condition,
- opt_groupId) {
- // Create script break point object.
- var script_break_point =
- new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
-
- // Assign number to the new script break point and add it.
- script_break_point.number_ = next_break_point_number++;
- script_break_point.setCondition(opt_condition);
- script_break_points.push(script_break_point);
-
- // Run through all scripts to see if this script break point matches any
- // loaded scripts.
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_break_point.matchesScript(scripts[i])) {
- script_break_point.set(scripts[i]);
- }
- }
-
- return script_break_point.number();
-};
-
-
-Debug.setScriptBreakPointById = function(script_id,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByName = function(script_name,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
- script_name, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByRegExp = function(script_regexp,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
- script_regexp, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.enableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.enable();
-};
-
-
-Debug.disableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.disable();
-};
-
-
-Debug.changeScriptBreakPointCondition = function(
- break_point_number, condition) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setCondition(condition);
-};
-
-
-Debug.scriptBreakPoints = function() {
- return script_break_points;
-};
-
-
Debug.clearStepping = function() {
%ClearStepping();
};
@@ -743,28 +176,6 @@ Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
-Debug.showBreakPoints = function(f, full) {
- if (!IS_FUNCTION(f)) throw %make_error(kDebuggerType);
- var source = full ? this.scriptSource(f) : this.source(f);
- var offset = full ? 0 : this.sourcePosition(f);
- var locations = %GetBreakLocations(f);
- if (!locations) return source;
- locations.sort(function(x, y) { return x - y; });
- var result = "";
- var prev_pos = 0;
- var pos;
- for (var i = 0; i < locations.length; i++) {
- pos = locations[i] - offset;
- result += source.slice(prev_pos, pos);
- result += "[B" + i + "]";
- prev_pos = pos;
- }
- pos = source.length;
- result += source.substring(prev_pos, pos);
- return result;
-};
-
-
// Get all the scripts currently loaded. Locating all the scripts is based on
// scanning the heap.
Debug.scripts = function() {
@@ -837,46 +248,6 @@ ExecutionState.prototype.selectedFrame = function() {
return this.selected_frame;
};
-function MakeBreakEvent(break_id, break_points_hit) {
- return new BreakEvent(break_id, break_points_hit);
-}
-
-
-function BreakEvent(break_id, break_points_hit) {
- this.frame_ = new FrameMirror(break_id, 0);
- this.break_points_hit_ = break_points_hit;
-}
-
-
-BreakEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Break;
-};
-
-
-BreakEvent.prototype.func = function() {
- return this.frame_.func();
-};
-
-
-BreakEvent.prototype.sourceLine = function() {
- return this.frame_.sourceLine();
-};
-
-
-BreakEvent.prototype.sourceColumn = function() {
- return this.frame_.sourceColumn();
-};
-
-
-BreakEvent.prototype.sourceLineText = function() {
- return this.frame_.sourceLineText();
-};
-
-
-BreakEvent.prototype.breakPointsHit = function() {
- return this.break_points_hit_;
-};
-
function MakeExceptionEvent(break_id, exception, uncaught, promise) {
return new ExceptionEvent(break_id, exception, uncaught, promise);
@@ -994,19 +365,15 @@ AsyncTaskEvent.prototype.id = function() {
utils.InstallConstants(global, [
"Debug", Debug,
- "BreakEvent", BreakEvent,
"CompileEvent", CompileEvent,
- "BreakPoint", BreakPoint,
]);
// Functions needed by the debugger runtime.
utils.InstallConstants(utils, [
"MakeExecutionState", MakeExecutionState,
"MakeExceptionEvent", MakeExceptionEvent,
- "MakeBreakEvent", MakeBreakEvent,
"MakeCompileEvent", MakeCompileEvent,
"MakeAsyncTaskEvent", MakeAsyncTaskEvent,
- "IsBreakPointTriggered", IsBreakPointTriggered,
]);
})
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 4dd8352695..7f5cd5f805 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -176,4 +176,34 @@ typedef int BreakpointId;
} // namespace debug
} // namespace v8
+// TODO(yangguo): this is legacy left over from removing v8-debug.h, and still
+// used in cctests. Let's get rid of these soon.
+namespace v8 {
+enum DebugEvent {
+ Break = 1,
+ Exception = 2,
+ AfterCompile = 3,
+ CompileError = 4,
+ AsyncTaskEvent = 5,
+};
+
+class Debug {
+ public:
+ class EventDetails {
+ public:
+ virtual DebugEvent GetEvent() const = 0;
+ virtual Local<Object> GetExecutionState() const = 0;
+ virtual Local<Object> GetEventData() const = 0;
+ virtual Local<Context> GetEventContext() const = 0;
+ virtual Local<Value> GetCallbackData() const = 0;
+
+ virtual Isolate* GetIsolate() const = 0;
+
+ virtual ~EventDetails() {}
+ };
+
+ typedef void (*EventCallback)(const EventDetails& event_details);
+};
+} // namespace v8
+
#endif // V8_DEBUG_INTERFACE_TYPES_H_
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index a2b22d58d4..4f53f8554f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -743,11 +743,12 @@ class FeedbackVectorFixer {
for (int i = 0; i < function_instances->length(); i++) {
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<Cell> new_cell = isolate->factory()->NewManyClosuresCell(
- isolate->factory()->undefined_value());
- fun->set_feedback_vector_cell(*new_cell);
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->NewManyClosuresCell(
+ isolate->factory()->undefined_value());
+ fun->set_feedback_cell(*feedback_cell);
// Only create feedback vectors if we already have the metadata.
- if (shared_info->is_compiled()) JSFunction::EnsureLiterals(fun);
+ if (shared_info->is_compiled()) JSFunction::EnsureFeedbackVector(fun);
}
}
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 2335b94f10..db599b77e6 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -321,4 +321,4 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
} // namespace internal
} // namespace v8
-#endif /* V8_DEBUG_LIVEEDIT_H_ */
+#endif // V8_DEBUG_LIVEEDIT_H_
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 15d5e64258..85052b3cae 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -54,11 +54,11 @@ macro IS_DATE(arg)
endmacro
macro IS_ERROR(arg)
-(%_ClassOf(arg) === 'Error')
+(%IsJSError(arg))
endmacro
macro IS_GENERATOR(arg)
-(%_ClassOf(arg) === 'Generator')
+(%IsJSGeneratorObject(arg))
endmacro
macro IS_MAP(arg)
@@ -66,11 +66,11 @@ macro IS_MAP(arg)
endmacro
macro IS_MAP_ITERATOR(arg)
-(%_ClassOf(arg) === 'Map Iterator')
+(%IsJSMapIterator(arg))
endmacro
macro IS_SCRIPT(arg)
-(%_ClassOf(arg) === 'Script')
+(%IsScriptWrapper(arg))
endmacro
macro IS_SET(arg)
@@ -78,7 +78,7 @@ macro IS_SET(arg)
endmacro
macro IS_SET_ITERATOR(arg)
-(%_ClassOf(arg) === 'Set Iterator')
+(%IsJSSetIterator(arg))
endmacro
// Must match PropertyFilter in property-details.h
@@ -638,7 +638,7 @@ inherits(ObjectMirror, ValueMirror);
ObjectMirror.prototype.className = function() {
- return %_ClassOf(this.value_);
+ return %ClassOf(this.value_);
};
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 3fabf555be..6881e114b3 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -41,6 +41,7 @@ namespace internal {
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
+ V(NotAString, "not a String") \
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
V(Overflow, "overflow") \
@@ -53,7 +54,8 @@ namespace internal {
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(WrongName, "wrong name") \
- V(WrongValue, "wrong value")
+ V(WrongValue, "wrong value") \
+ V(NoInitialElement, "no initial element")
enum class DeoptimizeReason : uint8_t {
#define DEOPTIMIZE_REASON(Name, message) k##Name,
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 362bd12cb6..644bd29796 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -276,6 +276,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
+ isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@@ -545,10 +546,8 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
- BytecodeArray* bytecode =
- translated_frame->raw_shared_info()->bytecode_array();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- return table->LookupRange(bytecode_offset, data_out, nullptr);
+ HandlerTable table(translated_frame->raw_shared_info()->bytecode_array());
+ return table.LookupRange(bytecode_offset, data_out, nullptr);
}
default:
break;
@@ -956,7 +955,8 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
(!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
+ output_frame->SetPc(
+ reinterpret_cast<intptr_t>(dispatch_builtin->InstructionStart()));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
@@ -980,7 +980,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
}
@@ -1114,7 +1114,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = reinterpret_cast<intptr_t>(
- adaptor_trampoline->instruction_start() +
+ adaptor_trampoline->InstructionStart() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
if (FLAG_enable_embedded_constant_pool) {
@@ -1303,7 +1303,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Compute this frame's PC.
DCHECK(bailout_id.IsValidForConstructStub());
- Address start = construct_stub->instruction_start();
+ Address start = construct_stub->InstructionStart();
int pc_offset =
bailout_id == BailoutId::ConstructStubCreate()
? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
@@ -1338,7 +1338,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
DCHECK_EQ(LAZY, bailout_type_);
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
}
@@ -1688,12 +1688,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
: isolate()->builtins()->builtin(
Builtins::kContinueToCodeStubBuiltin));
output_frame->SetPc(
- reinterpret_cast<intptr_t>(continue_to_builtin->instruction_start()));
+ reinterpret_cast<intptr_t>(continue_to_builtin->InstructionStart()));
Code* continuation =
isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
void Deoptimizer::MaterializeHeapObjects() {
@@ -1832,14 +1832,13 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
// Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable>(), MaybeHandle<ByteArray>(),
- MaybeHandle<DeoptimizationData>(), kImmovable);
+ MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable);
CHECK(Heap::IsImmovable(*code));
CHECK_NULL(data->deopt_entry_code_[type]);
@@ -2287,7 +2286,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
- CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
+ CHECK(code->InstructionStart() <= pc && pc <= code->InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
int last_deopt_id = kNoDeoptimizationId;
@@ -3906,7 +3905,7 @@ bool TranslatedState::DoUpdateFeedback() {
if (!feedback_vector_handle_.is_null()) {
CHECK(!feedback_slot_.IsInvalid());
isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
- CallICNexus nexus(feedback_vector_handle_, feedback_slot_);
+ FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_);
nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
return true;
}
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 263aa317d5..00e0e29546 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -41,6 +41,13 @@ class Disassembler {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+ // Disassemblers on ia32/x64 need a separate method for testing, as
+ // instruction decode method above continues on unimplemented opcodes, and
+ // does not test the disassemblers. Basic functionality of the method remains
+ // the same.
+ int InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction);
+
// Returns -1 if instruction does not mark the beginning of a constant pool,
// or the number of entries in the constant pool beginning here.
int ConstantPoolSizeAt(byte* instruction);
@@ -48,6 +55,7 @@ class Disassembler {
// Write disassembly into specified file 'f' using specified NameConverter
// (see constructor).
static void Disassemble(FILE* f, byte* begin, byte* end);
+
private:
const NameConverter& converter_;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index a26517b432..86cce891ec 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -256,7 +256,9 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// Print all the reloc info for this instruction which are not comments.
for (size_t i = 0; i < pcs.size(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
+ Code* host = converter.code();
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], host);
+ relocinfo.set_constant_pool(host ? host->constant_pool() : nullptr);
bool first_reloc_info = (i == 0);
PrintRelocInfo(&out, isolate, ref_encoder, os, &relocinfo,
@@ -267,7 +269,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
if (pcs.empty() && converter.code() != nullptr) {
- RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE32, 0, nullptr);
+ RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE, 0, nullptr);
if (dummy_rinfo.IsInConstantPool()) {
byte* constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index bd064eb9cd..68a8c031c0 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -298,4 +298,4 @@ class EhFrameDisassembler final {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_EH_FRAME_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 0018d59c63..0905677c3c 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -30,6 +30,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
return 3;
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
@@ -153,6 +155,92 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
return false;
}
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(HOLEY_SMI_ELEMENTS < PACKED_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case PACKED_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ *a_out = HOLEY_SMI_ELEMENTS;
+ return true;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case PACKED_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ *a_out = PACKED_ELEMENTS;
+ return true;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = HOLEY_DOUBLE_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+
+ break;
+ default:
+ break;
+ }
+ return false;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index b03f9340f3..b00966ef10 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -49,17 +49,19 @@ enum ElementsKind {
FLOAT32_ELEMENTS,
FLOAT64_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
+ BIGUINT64_ELEMENTS,
+ BIGINT64_ELEMENTS,
// Sentinel ElementsKind for objects with no elements.
NO_ELEMENTS,
// Derived constants from ElementsKind.
FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
+ LAST_ELEMENTS_KIND = BIGINT64_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS
};
@@ -229,6 +231,8 @@ inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
return false;
}
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b);
+
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
DCHECK(IsSmiElementsKind(from_kind));
return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 499af83078..471798dd79 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -12,7 +12,6 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
-#include "src/zone/zone.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -39,6 +38,8 @@
// - FixedFloat32ElementsAccessor
// - FixedFloat64ElementsAccessor
// - FixedUint8ClampedElementsAccessor
+// - FixedBigUint64ElementsAccessor
+// - FixedBigInt64ElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
@@ -90,7 +91,9 @@ enum Where { AT_START, AT_END };
V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
- FixedUint8ClampedArray)
+ FixedUint8ClampedArray) \
+ V(FixedBigUint64ElementsAccessor, BIGUINT64_ELEMENTS, FixedBigUint64Array) \
+ V(FixedBigInt64ElementsAccessor, BIGINT64_ELEMENTS, FixedBigInt64Array)
template<ElementsKind Kind> class ElementsKindTraits {
public:
@@ -718,22 +721,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::SliceImpl(receiver, start, end);
}
- Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end, Handle<JSObject> result) final {
- return Subclass::SliceWithResultImpl(receiver, start, end, result);
- }
-
static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
uint32_t end) {
UNREACHABLE();
}
- static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
- uint32_t start, uint32_t end,
- Handle<JSObject> result) {
- UNREACHABLE();
- }
-
Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
uint32_t delete_count, Arguments* args,
uint32_t add_count) final {
@@ -1035,13 +1027,25 @@ class ElementsAccessorBase : public InternalElementsAccessor {
kPackedSizeNotKnown, size);
}
- Object* CopyElements(Handle<JSReceiver> source, Handle<JSObject> destination,
+ void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, size_t start,
+ size_t end) {
+ Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end);
+ }
+
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) {
+ UNREACHABLE();
+ }
+
+ Object* CopyElements(Handle<Object> source, Handle<JSObject> destination,
size_t length, uint32_t offset) final {
return Subclass::CopyElementsHandleImpl(source, destination, length,
offset);
}
- static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleImpl(Handle<Object> source,
Handle<JSObject> destination,
size_t length, uint32_t offset) {
UNREACHABLE();
@@ -2995,15 +2999,9 @@ class TypedElementsAccessor
uint32_t end) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!array->WasNeutered());
- DCHECK(obj_value->IsNumber());
+ DCHECK(obj_value->IsNumeric());
- ctype value;
- if (obj_value->IsSmi()) {
- value = BackingStore::from(Smi::ToInt(*obj_value));
- } else {
- DCHECK(obj_value->IsHeapNumber());
- value = BackingStore::from(HeapNumber::cast(*obj_value)->value());
- }
+ ctype value = BackingStore::FromHandle(obj_value);
// Ensure indexes are within array bounds
DCHECK_LE(0, start);
@@ -3034,41 +3032,49 @@ class TypedElementsAccessor
length > static_cast<uint32_t>(elements->length())) {
return Just(true);
}
- if (!value->IsNumber()) return Just(false);
-
- double search_value = value->Number();
-
- if (!std::isfinite(search_value)) {
- // Integral types cannot represent +Inf or NaN
- if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
- AccessorClass::kind() > FLOAT64_ELEMENTS) {
- return Just(false);
- }
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return false if value can't be represented in this space
- return Just(false);
- }
-
+ ctype typed_search_value;
// Prototype has no elements, and not searching for the hole --- limit
// search to backing store length.
if (static_cast<uint32_t>(elements->length()) < length) {
length = elements->length();
}
- if (!std::isnan(search_value)) {
- for (uint32_t k = start_from; k < length; ++k) {
- double element_k = elements->get_scalar(k);
- if (element_k == search_value) return Just(true);
- }
- return Just(false);
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just(false);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just(false);
} else {
- for (uint32_t k = start_from; k < length; ++k) {
- double element_k = elements->get_scalar(k);
- if (std::isnan(element_k)) return Just(true);
+ if (!value->IsNumber()) return Just(false);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just(false);
+ }
+ if (std::isnan(search_value)) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ double element_k = elements->get_scalar(k);
+ if (std::isnan(element_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this space.
+ return Just(false);
}
- return Just(false);
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just(false); // Loss of precision.
+ }
+ }
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ ctype element_k = elements->get_scalar(k);
+ if (element_k == typed_search_value) return Just(true);
}
+ return Just(false);
}
static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
@@ -3080,20 +3086,33 @@ class TypedElementsAccessor
if (WasNeutered(*receiver)) return Just<int64_t>(-1);
BackingStore* elements = BackingStore::cast(receiver->elements());
- if (!value->IsNumber()) return Just<int64_t>(-1);
-
- double search_value = value->Number();
+ ctype typed_search_value;
- if (!std::isfinite(search_value)) {
- // Integral types cannot represent +Inf or NaN.
- if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
- AccessorClass::kind() > FLOAT64_ELEMENTS) {
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just<int64_t>(-1);
+ }
+ if (std::isnan(search_value)) {
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return false if value can't be represented in this ElementsKind.
- return Just<int64_t>(-1);
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
}
// Prototype has no elements, and not searching for the hole --- limit
@@ -3102,15 +3121,6 @@ class TypedElementsAccessor
length = elements->length();
}
- if (std::isnan(search_value)) {
- return Just<int64_t>(-1);
- }
-
- ctype typed_search_value = static_cast<ctype>(search_value);
- if (static_cast<double>(typed_search_value) != search_value) {
- return Just<int64_t>(-1); // Loss of precision.
- }
-
for (uint32_t k = start_from; k < length; ++k) {
ctype element_k = elements->get_scalar(k);
if (element_k == typed_search_value) return Just<int64_t>(k);
@@ -3125,28 +3135,34 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DCHECK(!WasNeutered(*receiver));
- if (!value->IsNumber()) return Just<int64_t>(-1);
BackingStore* elements = BackingStore::cast(receiver->elements());
+ ctype typed_search_value;
- double search_value = value->Number();
-
- if (!std::isfinite(search_value)) {
- if (std::is_integral<ctype>::value) {
- // Integral types cannot represent +Inf or NaN.
- return Just<int64_t>(-1);
- } else if (std::isnan(search_value)) {
- // Strict Equality Comparison of NaN is always false.
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ if (std::is_integral<ctype>::value) {
+ // Integral types cannot represent +Inf or NaN.
+ return Just<int64_t>(-1);
+ } else if (std::isnan(search_value)) {
+ // Strict Equality Comparison of NaN is always false.
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return -1 if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return -1 if value can't be represented in this ElementsKind.
- return Just<int64_t>(-1);
- }
-
- ctype typed_search_value = static_cast<ctype>(search_value);
- if (static_cast<double>(typed_search_value) != search_value) {
- return Just<int64_t>(-1); // Loss of precision.
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
}
DCHECK_LT(start_from, elements->length());
@@ -3186,55 +3202,52 @@ class TypedElementsAccessor
return result;
}
- static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
- uint32_t start, uint32_t end,
- Handle<JSObject> result) {
- Isolate* isolate = receiver->GetIsolate();
- DCHECK(!WasNeutered(*receiver));
- DCHECK(result->IsJSTypedArray());
- DCHECK(!WasNeutered(*result));
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(destination->GetElementsKind(), AccessorClass::kind());
+ DCHECK(!source->WasNeutered());
+ DCHECK(!destination->WasNeutered());
DCHECK_LE(start, end);
+ DCHECK_LE(end, source->length_value());
- Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- Handle<JSTypedArray> result_array = Handle<JSTypedArray>::cast(result);
- DCHECK_LE(end, array->length_value());
+ size_t count = end - start;
+ DCHECK_LE(count, destination->length_value());
- // Fast path for the same type result array
- if (result_array->type() == array->type()) {
- int64_t element_size = array->element_size();
- int64_t count = end - start;
+ FixedTypedArrayBase* src_elements =
+ FixedTypedArrayBase::cast(source->elements());
+ BackingStore* dest_elements = BackingStore::cast(destination->elements());
- DisallowHeapAllocation no_gc;
- BackingStore* src_elements = BackingStore::cast(receiver->elements());
- BackingStore* result_elements =
- BackingStore::cast(result_array->elements());
-
- DCHECK_LE(count, result_elements->length());
- uint8_t* src =
- static_cast<uint8_t*>(src_elements->DataPtr()) + start * element_size;
- uint8_t* result = static_cast<uint8_t*>(result_elements->DataPtr());
- if (array->buffer() != result_array->buffer()) {
- std::memcpy(result, src, count * element_size);
- } else {
- // The spec defines the copy-step iteratively, which means that we
- // cannot use memcpy if the buffer is shared.
- uint8_t* end = src + count * element_size;
- while (src < end) {
- *result++ = *src++;
- }
+ size_t element_size = source->element_size();
+ uint8_t* source_data =
+ static_cast<uint8_t*>(src_elements->DataPtr()) + start * element_size;
+
+ // Fast path for the same type result array
+ if (source->type() == destination->type()) {
+ uint8_t* dest_data = static_cast<uint8_t*>(dest_elements->DataPtr());
+
+ // The spec defines the copy-step iteratively, which means that we
+ // cannot use memcpy if the buffer is shared.
+ uint8_t* end_ptr = source_data + count * element_size;
+ while (source_data < end_ptr) {
+ *dest_data++ = *source_data++;
}
- return result_array;
+ return;
}
- // If the types of the two typed arrays are different, properly convert
- // elements
- Handle<BackingStore> from(BackingStore::cast(array->elements()), isolate);
- ElementsAccessor* result_accessor = result_array->GetElementsAccessor();
- for (uint32_t i = start; i < end; i++) {
- Handle<Object> elem = AccessorClass::GetImpl(isolate, *from, i);
- result_accessor->Set(result_array, i - start, *elem);
+ switch (source->GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>(source_data, dest_elements, \
+ count, 0); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ break;
}
- return result_array;
}
static bool HasSimpleRepresentation(InstanceType type) {
@@ -3271,9 +3284,9 @@ class TypedElementsAccessor
BackingStore::cast(destination->elements());
DCHECK_LE(offset, destination->length_value());
- DCHECK_LE(source->length_value(), destination->length_value() - offset);
+ DCHECK_LE(length, destination->length_value() - offset);
DCHECK(source->length()->IsSmi());
- DCHECK_EQ(length, source->length_value());
+ DCHECK_LE(length, source->length_value());
InstanceType source_type = source_elements->map()->instance_type();
InstanceType destination_type =
@@ -3298,15 +3311,15 @@ class TypedElementsAccessor
std::memmove(dest_data + offset * element_size, source_data,
length * element_size);
} else {
- Isolate* isolate = source->GetIsolate();
- Zone zone(isolate->allocator(), ZONE_NAME);
+ std::unique_ptr<uint8_t[]> cloned_source_elements;
// If the typedarrays are overlapped, clone the source.
if (dest_data + dest_byte_length > source_data &&
source_data + source_byte_length > dest_data) {
- uint8_t* temp_data = zone.NewArray<uint8_t>(source_byte_length);
- std::memcpy(temp_data, source_data, source_byte_length);
- source_data = temp_data;
+ cloned_source_elements.reset(new uint8_t[source_byte_length]);
+ std::memcpy(cloned_source_elements.get(), source_data,
+ source_byte_length);
+ source_data = cloned_source_elements.get();
}
switch (source->GetElementsKind()) {
@@ -3339,7 +3352,8 @@ class TypedElementsAccessor
// them.
if (source_proto->IsNull(isolate)) return false;
if (source_proto->IsJSProxy()) return true;
- if (!context->is_initial_array_prototype(JSObject::cast(source_proto))) {
+ if (!context->native_context()->is_initial_array_prototype(
+ JSObject::cast(source_proto))) {
return true;
}
@@ -3349,6 +3363,7 @@ class TypedElementsAccessor
static bool TryCopyElementsFastNumber(Context* context, JSArray* source,
JSTypedArray* destination,
size_t length, uint32_t offset) {
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
Isolate* isolate = source->GetIsolate();
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
@@ -3418,18 +3433,24 @@ class TypedElementsAccessor
return false;
}
- static Object* CopyElementsHandleSlow(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleSlow(Handle<Object> source,
Handle<JSTypedArray> destination,
size_t length, uint32_t offset) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = destination->GetIsolate();
Handle<BackingStore> destination_elements(
BackingStore::cast(destination->elements()));
for (uint32_t i = 0; i < length; i++) {
- LookupIterator it(isolate, source, i, source);
+ LookupIterator it(isolate, source, i);
Handle<Object> elem;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, Object::ToNumber(elem));
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ BigInt::FromObject(isolate, elem));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::ToNumber(elem));
+ }
if (V8_UNLIKELY(destination->WasNeutered())) {
const char* op = "set";
@@ -3450,7 +3471,7 @@ class TypedElementsAccessor
// This doesn't guarantee that the destination array will be completely
// filled. The caller must do this by passing a source with equal length, if
// that is required.
- static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleImpl(Handle<Object> source,
Handle<JSObject> destination,
size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
@@ -3463,8 +3484,30 @@ class TypedElementsAccessor
// All conversions from TypedArrays can be done without allocation.
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
- CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
- return *isolate->factory()->undefined_value();
+ ElementsKind source_kind = source_ta->GetElementsKind();
+ bool source_is_bigint =
+ source_kind == BIGINT64_ELEMENTS || source_kind == BIGUINT64_ELEMENTS;
+ bool target_is_bigint =
+ Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS;
+ if (target_is_bigint) {
+ if (V8_UNLIKELY(!source_is_bigint)) {
+ Handle<Object> first =
+ JSReceiver::GetElement(isolate, source_ta, 0).ToHandleChecked();
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kBigIntFromObject, first));
+ }
+ } else {
+ if (V8_UNLIKELY(source_is_bigint)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kBigIntToNumber));
+ }
+ }
+ // If we have to copy more elements than we have in the source, we need to
+ // do special handling and conversion; that happens in the slow case.
+ if (length + offset <= source_ta->length_value()) {
+ CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
+ return *isolate->factory()->undefined_value();
+ }
}
// Fast cases for packed numbers kinds where we don't need to allocate.
@@ -4459,6 +4502,13 @@ void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
}
}
+void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, uintptr_t start,
+ uintptr_t end) {
+ destination->GetElementsAccessor()->CopyTypedArrayElementsSlice(
+ source, destination, start, end);
+}
+
void ElementsAccessor::InitializeOncePerProcess() {
static ElementsAccessor* accessor_array[] = {
#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index de5aa0d878..a2b8b49c93 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class JSTypedArray;
+
// Abstract base class for handles that can operate on objects with differing
// ElementsKinds.
class ElementsAccessor {
@@ -141,9 +143,6 @@ class ElementsAccessor {
virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) = 0;
- virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end, Handle<JSObject> result) = 0;
-
virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) = 0;
@@ -185,7 +184,7 @@ class ElementsAccessor {
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
- virtual Object* CopyElements(Handle<JSReceiver> source,
+ virtual Object* CopyElements(Handle<Object> source,
Handle<JSObject> destination, size_t length,
uint32_t offset = 0) = 0;
@@ -193,6 +192,10 @@ class ElementsAccessor {
Handle<JSObject> object,
uint32_t length) = 0;
+ virtual void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) = 0;
+
protected:
friend class LookupIterator;
@@ -241,7 +244,6 @@ MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Arguments* args);
// Called directly from CSA.
-class JSTypedArray;
void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
JSArray* source,
JSTypedArray* destination,
@@ -250,6 +252,9 @@ void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
JSTypedArray* destination,
uintptr_t length, uintptr_t offset);
+void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, uintptr_t start,
+ uintptr_t end);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 7dd920a446..5030e261d6 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -5,11 +5,8 @@
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
-#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/globals.h"
-#include "src/objects/code.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 52157b5034..c20592dc81 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -281,6 +281,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::copy_typed_array_elements_to_typed_array(isolate)
.address(),
"copy_typed_array_elements_to_typed_array");
+ Add(ExternalReference::copy_typed_array_elements_slice(isolate).address(),
+ "copy_typed_array_elements_slice");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index fab539bf8b..6fd8e8c61e 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -17,7 +17,9 @@
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/microtask-inl.h"
#include "src/objects/module.h"
+#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
#include "src/unicode-cache.h"
#include "src/unicode-decoder.h"
@@ -149,13 +151,11 @@ Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
}
Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
- int hash, Handle<FixedArray> raw_strings,
- Handle<FixedArray> cooked_strings) {
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
DCHECK_EQ(raw_strings->length(), cooked_strings->length());
DCHECK_LT(0, raw_strings->length());
Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE3_TYPE, TENURED));
- result->set_hash(hash);
+ Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE2_TYPE, TENURED));
result->set_raw_strings(*raw_strings);
result->set_cooked_strings(*cooked_strings);
return result;
@@ -222,7 +222,8 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
FixedArray);
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -230,7 +231,7 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArray(length, NOT_TENURED),
+ isolate()->heap()->AllocateFixedArray(length, pretenure),
FixedArray);
}
@@ -391,9 +392,9 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
- const char* start = string.start();
+ const char* ascii_data = string.start();
int length = string.length();
- int non_ascii_start = String::NonAsciiStart(start, length);
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
@@ -401,35 +402,38 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
}
// Non-ASCII and we need to decode.
+ auto non_ascii = string.SubVector(non_ascii_start, length);
Access<UnicodeCache::Utf8Decoder>
decoder(isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start,
- length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
+
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
String);
+
// Copy ASCII portion.
uint16_t* data = result->GetChars();
- const char* ascii_data = string.start();
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
+
// Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
return result;
}
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
PretenureFlag pretenure) {
- // Check for ASCII first since this is the common case.
- const char* start = reinterpret_cast<const char*>(str->GetChars() + begin);
- int non_ascii_start = String::NonAsciiStart(start, length);
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
if (non_ascii_start >= length) {
// If the string is ASCII, we can just make a substring.
// TODO(v8): the pretenure flag is ignored in this case.
@@ -437,28 +441,35 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
}
// Non-ASCII and we need to decode.
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
Access<UnicodeCache::Utf8Decoder> decoder(
isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(start + non_ascii_start, length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
+
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
- // Reset the decoder, because the original {str} may have moved.
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars() + begin);
- decoder->Reset(ascii_data + non_ascii_start, length - non_ascii_start);
+ // Update pointer references, since the original string may have moved after
+ // allocation.
+ ascii_data = reinterpret_cast<const char*>(str->GetChars() + begin);
+ non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+
// Copy ASCII portion.
uint16_t* data = result->GetChars();
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
+
// Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
return result;
}
@@ -985,6 +996,12 @@ Handle<Symbol> Factory::NewPrivateSymbol() {
return symbol;
}
+Handle<Symbol> Factory::NewPrivateFieldSymbol() {
+ Handle<Symbol> symbol = NewSymbol();
+ symbol->set_is_private_field();
+ return symbol;
+}
+
Handle<Context> Factory::NewNativeContext() {
Handle<FixedArray> array =
NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
@@ -1186,6 +1203,38 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
return script;
}
+Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context) {
+ DCHECK(callable->IsCallable());
+ Handle<CallableTask> microtask =
+ Handle<CallableTask>::cast(NewStruct(CALLABLE_TASK_TYPE));
+ microtask->set_callable(*callable);
+ microtask->set_context(*context);
+ return microtask;
+}
+
+Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data) {
+ Handle<CallbackTask> microtask =
+ Handle<CallbackTask>::cast(NewStruct(CALLBACK_TASK_TYPE));
+ microtask->set_callback(*callback);
+ microtask->set_data(*data);
+ return microtask;
+}
+
+Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context) {
+ DCHECK(then->IsCallable());
+ Handle<PromiseResolveThenableJobTask> microtask =
+ Handle<PromiseResolveThenableJobTask>::cast(
+ NewStruct(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE));
+ microtask->set_promise_to_resolve(*promise_to_resolve);
+ microtask->set_then(*then);
+ microtask->set_thenable(*thenable);
+ microtask->set_context(*context);
+ return microtask;
+}
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(isolate(),
@@ -1194,11 +1243,6 @@ Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
}
-Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
- return NewForeign((Address) desc, TENURED);
-}
-
-
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
CALL_HEAP_FUNCTION(
@@ -1247,22 +1291,28 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Cell);
}
-Handle<Cell> Factory::NewNoClosuresCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*no_closures_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->no_closures_cell_map(), *value),
+ FeedbackCell);
}
-Handle<Cell> Factory::NewOneClosureCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*one_closure_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->one_closure_cell_map(), *value),
+ FeedbackCell);
}
-Handle<Cell> Factory::NewManyClosuresCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*many_closures_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->many_closures_cell_map(), *value),
+ FeedbackCell);
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name) {
@@ -1423,8 +1473,10 @@ Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
HeapNumber);
}
-Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBigInt(length),
+Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateBigInt(length, pretenure),
FreshlyAllocatedBigInt);
}
@@ -1519,7 +1571,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_shared(*info);
function->set_code(info->code());
function->set_context(*context_or_undefined);
- function->set_feedback_vector_cell(*undefined_cell());
+ function->set_feedback_cell(*many_closures_cell());
int header_size;
if (map->has_prototype_slot()) {
header_size = JSFunction::kSizeWithPrototype;
@@ -1664,11 +1716,11 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<Cell> vector, PretenureFlag pretenure) {
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure) {
Handle<Map> initial_map(
Map::cast(context->native_context()->get(info->function_map_index())));
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context, vector,
- pretenure);
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ feedback_cell, pretenure);
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
@@ -1688,29 +1740,29 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Object> context_or_undefined, Handle<Cell> vector,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
PretenureFlag pretenure) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
NewFunction(initial_map, info, context_or_undefined, pretenure);
- // Bump the closure count that is encoded in the vector cell's map.
- if (vector->map() == *no_closures_cell_map()) {
- vector->set_map(*one_closure_cell_map());
- } else if (vector->map() == *one_closure_cell_map()) {
- vector->set_map(*many_closures_cell_map());
+ // Bump the closure count that is encoded in the feedback cell's map.
+ if (feedback_cell->map() == *no_closures_cell_map()) {
+ feedback_cell->set_map(*one_closure_cell_map());
+ } else if (feedback_cell->map() == *one_closure_cell_map()) {
+ feedback_cell->set_map(*many_closures_cell_map());
} else {
- DCHECK_EQ(vector->map(), *many_closures_cell_map());
+ DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
}
- // Check that the optimized code in the feedback vector wasn't marked for
+ // Check that the optimized code in the feedback cell wasn't marked for
// deoptimization while not pointed to by any live JSFunction.
- if (vector->value()->IsFeedbackVector()) {
- FeedbackVector::cast(vector->value())
+ if (feedback_cell->value()->IsFeedbackVector()) {
+ FeedbackVector::cast(feedback_cell->value())
->EvictOptimizedCodeMarkedForDeoptimization(
*info, "new function from shared function info");
}
- result->set_feedback_vector_cell(*vector);
+ result->set_feedback_cell(*feedback_cell);
if (context_or_undefined->IsContext()) {
// Give compiler a chance to pre-initialize.
@@ -1759,17 +1811,13 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
Handle<Code> Factory::NewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, MaybeHandle<HandlerTable> maybe_handler_table,
- MaybeHandle<ByteArray> maybe_source_position_table,
+ int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
uint32_t stub_key, bool is_turbofanned, int stack_slots,
- int safepoint_table_offset) {
+ int safepoint_table_offset, int handler_table_offset) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
- Handle<HandlerTable> handler_table =
- maybe_handler_table.is_null() ? HandlerTable::Empty(isolate())
- : maybe_handler_table.ToHandleChecked();
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
? empty_byte_array()
@@ -1778,13 +1826,13 @@ Handle<Code> Factory::NewCode(
maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
: maybe_deopt_data.ToHandleChecked();
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateCode(
- desc, kind, self_ref, builtin_index, *reloc_info, *data_container,
- *handler_table, *source_position_table, *deopt_data, movability,
- stub_key, is_turbofanned, stack_slots, safepoint_table_offset),
- Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateCode(
+ desc, kind, self_ref, builtin_index, *reloc_info,
+ *data_container, *source_position_table, *deopt_data,
+ movability, stub_key, is_turbofanned, stack_slots,
+ safepoint_table_offset, handler_table_offset),
+ Code);
}
Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
@@ -2079,12 +2127,13 @@ Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
}
Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
- Handle<JSReceiver> sync_iterator) {
+ Handle<JSReceiver> sync_iterator, Handle<Object> next) {
Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
Handle<JSAsyncFromSyncIterator> iterator =
Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
iterator->set_sync_iterator(*sync_iterator);
+ iterator->set_next(*next);
return iterator;
}
@@ -2453,14 +2502,10 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
Handle<ScopeInfo> scope_info) {
- DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared =
NewSharedFunctionInfo(name, code, IsConstructable(kind), kind);
shared->set_scope_info(*scope_info);
shared->set_outer_scope_info(*the_hole_value());
- if (IsGeneratorFunction(kind)) {
- shared->set_instance_class_name(isolate()->heap()->Generator_string());
- }
return shared;
}
@@ -2530,13 +2575,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
: BUILTIN_CODE(isolate(), ConstructedNonConstructable);
share->SetConstructStub(*construct_stub);
- share->set_instance_class_name(*Object_string());
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
- StaticFeedbackVectorSpec empty_spec;
- Handle<FeedbackMetadata> feedback_metadata =
- FeedbackMetadata::New(isolate(), &empty_spec);
+ Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(isolate());
share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
#if V8_SFI_HAS_UNIQUE_ID
@@ -2673,7 +2715,7 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info =
Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE, TENURED));
new_break_point_info->set_source_position(source_position);
- new_break_point_info->set_break_point_objects(*undefined_value());
+ new_break_point_info->set_break_points(*undefined_value());
return new_break_point_info;
}
@@ -2701,7 +2743,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
Handle<SourcePositionTableWithFrameCache>
Factory::NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<NumberDictionary> stack_frame_cache) {
+ Handle<SimpleNumberDictionary> stack_frame_cache) {
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
@@ -3084,6 +3126,19 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
return map;
}
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateJSPromise(
+ *isolate()->promise_function(), pretenure),
+ JSPromise);
+}
+
+Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook(pretenure);
+ isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
+ return promise;
+}
+
// static
NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
Handle<Map> map) {
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index f0e9d63885..966b0602fe 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -27,6 +27,8 @@ class AliasedArgumentsEntry;
class BreakPointInfo;
class BreakPoint;
class BoilerplateDescription;
+class CallableTask;
+class CallbackTask;
class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
@@ -40,6 +42,7 @@ class JSWeakMap;
class NewFunctionArgs;
struct SourceRange;
class PreParsedScopeData;
+class PromiseResolveThenableJobTask;
class TemplateObjectDescription;
enum FunctionMode {
@@ -101,7 +104,8 @@ class V8_EXPORT_PRIVATE Factory final {
int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(int length);
+ Handle<FixedArray> NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
@@ -165,8 +169,7 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a new TemplateObjectDescription struct.
Handle<TemplateObjectDescription> NewTemplateObjectDescription(
- int hash, Handle<FixedArray> raw_strings,
- Handle<FixedArray> cooked_strings);
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -326,6 +329,7 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a symbol.
Handle<Symbol> NewSymbol();
Handle<Symbol> NewPrivateSymbol();
+ Handle<Symbol> NewPrivateFieldSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -386,16 +390,21 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<SourcePositionTableWithFrameCache>
NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<NumberDictionary> stack_frame_cache);
+ Handle<SimpleNumberDictionary> stack_frame_cache);
+
+ // Allocate various microtasks.
+ Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context);
+ Handle<CallbackTask> NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data);
+ Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate a new foreign object. The foreign is pretenured (allocated
- // directly in the old generation).
- Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
-
Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -417,9 +426,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
- Handle<Cell> NewNoClosuresCell(Handle<Object> value);
- Handle<Cell> NewOneClosureCell(Handle<Object> value);
- Handle<Cell> NewManyClosuresCell(Handle<Object> value);
+ Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
Handle<TransitionArray> NewTransitionArray(int capacity);
@@ -495,7 +504,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
- Handle<FreshlyAllocatedBigInt> NewBigInt(int length);
+ Handle<FreshlyAllocatedBigInt> NewBigInt(
+ int length, PretenureFlag pretenure = NOT_TENURED);
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -594,7 +604,7 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
- Handle<JSReceiver> sync_iterator);
+ Handle<JSReceiver> sync_iterator, Handle<Object> next);
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
@@ -635,12 +645,12 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Object> context_or_undefined, Handle<Cell> vector,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
- Handle<Cell> vector, PretenureFlag pretenure = TENURED);
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
@@ -677,15 +687,14 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
Handle<Object> self_reference,
int32_t builtin_index = Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable> maybe_handler_table =
- MaybeHandle<HandlerTable>(),
MaybeHandle<ByteArray> maybe_source_position_table =
MaybeHandle<ByteArray>(),
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
Movability movability = kMovable, uint32_t stub_key = 0,
bool is_turbofanned = false, int stack_slots = 0,
- int safepoint_table_offset = 0);
+ int safepoint_table_offset = 0,
+ int handler_table_offset = 0);
// Allocates a new, empty code object for use by builtin deserialization. The
// given {size} argument specifies the size of the entire code object.
@@ -848,6 +857,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+ Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
+
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
@@ -875,6 +886,9 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSPromise> NewJSPromiseWithoutHook(
+ PretenureFlag pretenure = NOT_TENURED);
};
// Utility class to simplify argument handling around JSFunction creation.
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index 9572c7026d..8c061dae7f 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -229,7 +229,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen9;
*exponent = 9;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 29:
case 28:
case 27:
@@ -237,7 +238,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen8;
*exponent = 8;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 26:
case 25:
case 24:
@@ -245,7 +247,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen7;
*exponent = 7;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 23:
case 22:
case 21:
@@ -254,7 +257,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen6;
*exponent = 6;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 19:
case 18:
case 17:
@@ -262,7 +266,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen5;
*exponent = 5;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 16:
case 15:
case 14:
@@ -270,7 +275,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen4;
*exponent = 4;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 13:
case 12:
case 11:
@@ -279,7 +285,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 1000;
*exponent = 3;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 9:
case 8:
case 7:
@@ -287,7 +294,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 100;
*exponent = 2;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 6:
case 5:
case 4:
@@ -295,7 +303,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 10;
*exponent = 1;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 3:
case 2:
case 1:
@@ -303,7 +312,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 1;
*exponent = 0;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 0:
*power = 0;
*exponent = -1;
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index c3bdd82616..f5240baf1b 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -12,35 +12,23 @@
namespace v8 {
namespace internal {
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
- int slot = This()->slots();
+FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) {
+ int slot = slots();
int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
- This()->append(kind);
+ append(kind);
for (int i = 1; i < entries_per_slot; i++) {
- This()->append(FeedbackSlotKind::kInvalid);
+ append(FeedbackSlotKind::kInvalid);
}
return FeedbackSlot(slot);
}
-template FeedbackSlot FeedbackVectorSpecBase<FeedbackVectorSpec>::AddSlot(
- FeedbackSlotKind kind);
-template FeedbackSlot FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddSlot(
- FeedbackSlotKind kind);
-
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddTypeProfileSlot() {
+FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() {
FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile);
CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex,
FeedbackVector::GetIndex(slot));
return slot;
}
-template FeedbackSlot
-FeedbackVectorSpecBase<FeedbackVectorSpec>::AddTypeProfileSlot();
-template FeedbackSlot
-FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddTypeProfileSlot();
-
bool FeedbackVectorSpec::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
@@ -77,18 +65,12 @@ void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
set(index, Smi::FromInt(new_data));
}
-template Handle<FeedbackMetadata> FeedbackMetadata::New(
- Isolate* isolate, const StaticFeedbackVectorSpec* spec);
-template Handle<FeedbackMetadata> FeedbackMetadata::New(
- Isolate* isolate, const FeedbackVectorSpec* spec);
-
// static
-template <typename Spec>
Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
- const Spec* spec) {
+ const FeedbackVectorSpec* spec) {
Factory* factory = isolate->factory();
- const int slot_count = spec->slots();
+ const int slot_count = spec == nullptr ? 0 : spec->slots();
const int slot_kinds_length = VectorICComputer::word_count(slot_count);
const int length = slot_kinds_length + kReservedIndexCount;
if (length == kReservedIndexCount) {
@@ -96,6 +78,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
}
#ifdef DEBUG
for (int i = 0; i < slot_count;) {
+ DCHECK(spec);
FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
for (int j = 1; j < entry_size; j++) {
@@ -116,6 +99,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
Handle<FeedbackMetadata> metadata = Handle<FeedbackMetadata>::cast(array);
for (int i = 0; i < slot_count; i++) {
+ DCHECK(spec);
FeedbackSlot slot(i);
FeedbackSlotKind kind = spec->GetKind(slot);
metadata->SetKind(slot, kind);
@@ -266,7 +250,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kCreateClosure: {
- Handle<Cell> cell = factory->NewNoClosuresCell(undefined_value);
+ Handle<FeedbackCell> cell = factory->NewNoClosuresCell(undefined_value);
vector->set(index, *cell);
break;
}
@@ -392,110 +376,11 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
- FeedbackSlotKind kind = iter.kind();
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- switch (kind) {
- case FeedbackSlotKind::kCall: {
- CallICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadProperty: {
- LoadICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
- LoadGlobalICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadKeyed: {
- KeyedLoadICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed: {
- StoreICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreGlobalICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict: {
- KeyedStoreICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kForIn:
- case FeedbackSlotKind::kBinaryOp:
- case FeedbackSlotKind::kCompareOp: {
- DCHECK(Get(slot)->IsSmi());
- // don't clear these smi slots.
- // Set(slot, Smi::kZero);
- break;
- }
- case FeedbackSlotKind::kInstanceOf: {
- InstanceOfICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kCreateClosure:
- case FeedbackSlotKind::kTypeProfile: {
- break;
- }
- case FeedbackSlotKind::kLiteral: {
- Set(slot, Smi::kZero, SKIP_WRITE_BARRIER);
- feedback_updated = true;
- break;
- }
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- StoreDataPropertyInLiteralICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kInvalid:
- case FeedbackSlotKind::kKindsNumber:
- UNREACHABLE();
- break;
- }
+ FeedbackNexus nexus(this, slot);
+ feedback_updated |= nexus.Clear();
}
}
return feedback_updated;
@@ -526,10 +411,92 @@ Handle<FixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
}
void FeedbackNexus::ConfigureUninitialized() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ Isolate* isolate = GetIsolate();
+ switch (kind()) {
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kCall: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool FeedbackNexus::Clear() {
+ bool feedback_updated = false;
+
+ switch (kind()) {
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kTypeProfile:
+ // We don't clear these kinds ever.
+ break;
+
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kBinaryOp:
+ // We don't clear these, either.
+ break;
+
+ case FeedbackSlotKind::kLiteral:
+ SetFeedback(Smi::kZero, SKIP_WRITE_BARRIER);
+ feedback_updated = true;
+ break;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ if (!IsCleared()) {
+ ConfigurePremonomorphic();
+ feedback_updated = true;
+ }
+ break;
+
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ if (!IsCleared()) {
+ ConfigureUninitialized();
+ feedback_updated = true;
+ }
+ break;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+ return feedback_updated;
}
void FeedbackNexus::ConfigurePremonomorphic() {
@@ -557,70 +524,146 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
return changed;
}
-InlineCacheState LoadICNexus::StateFromFeedback() const {
+InlineCacheState FeedbackNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
+ switch (kind()) {
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ // CreateClosure and literal slots don't have a notion of state.
+ UNREACHABLE();
+ break;
- return UNINITIALIZED;
-}
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ if (feedback->IsSmi()) return MONOMORPHIC;
-InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
+ Object* extra = GetFeedbackExtra();
+ if (!WeakCell::cast(feedback)->cleared() ||
+ extra != *FeedbackVector::UninitializedSentinel(isolate)) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ }
+ if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ }
+ if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
+ return PREMONOMORPHIC;
+ }
+ if (feedback->IsFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps are
+ // cleared.
+ return POLYMORPHIC;
+ }
+ if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+ if (feedback->IsName()) {
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
+ Object* extra = GetFeedbackExtra();
+ FixedArray* extra_array = FixedArray::cast(extra);
+ return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ }
+ UNREACHABLE();
+ }
+ case FeedbackSlotKind::kCall: {
+ if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return GENERIC;
+ } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
+ return MONOMORPHIC;
+ }
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- } else if (feedback->IsName()) {
- Object* extra = GetFeedbackExtra();
- FixedArray* extra_array = FixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
- }
+ CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
+ return UNINITIALIZED;
+ }
+ case FeedbackSlotKind::kBinaryOp: {
+ BinaryOperationHint hint = GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == BinaryOperationHint::kAny) {
+ return GENERIC;
+ }
- return UNINITIALIZED;
-}
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kCompareOp: {
+ CompareOperationHint hint = GetCompareOperationFeedback();
+ if (hint == CompareOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == CompareOperationHint::kAny) {
+ return GENERIC;
+ }
-void GlobalICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kForIn: {
+ ForInHint hint = GetForInFeedback();
+ if (hint == ForInHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == ForInHint::kAny) {
+ return GENERIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+
+ return MEGAMORPHIC;
+ }
+ case FeedbackSlotKind::kTypeProfile: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ }
+ return MONOMORPHIC;
+ }
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+ return UNINITIALIZED;
}
-void GlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ DCHECK(IsGlobalICKind(kind()));
Isolate* isolate = GetIsolate();
SetFeedback(*isolate->factory()->NewWeakCell(cell));
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
}
-bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
+bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
int context_slot_index) {
+ DCHECK(IsGlobalICKind(kind()));
DCHECK_LE(0, script_context_index);
DCHECK_LE(0, context_slot_index);
if (!ContextIndexBits::is_valid(script_context_index) ||
@@ -637,112 +680,44 @@ bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
return true;
}
-void GlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
+void FeedbackNexus::ConfigureHandlerMode(Handle<Object> handler) {
+ DCHECK(IsGlobalICKind(kind()));
SetFeedback(GetIsolate()->heap()->empty_weak_cell());
SetFeedbackExtra(*handler);
}
-InlineCacheState GlobalICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
- if (feedback->IsSmi()) return MONOMORPHIC;
-
- Object* extra = GetFeedbackExtra();
- if (!WeakCell::cast(feedback)->cleared() ||
- extra != *FeedbackVector::UninitializedSentinel(isolate)) {
- return MONOMORPHIC;
- }
- return UNINITIALIZED;
-}
-
-InlineCacheState StoreICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
-
- return UNINITIALIZED;
-}
-
-InlineCacheState KeyedStoreICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- } else if (feedback->IsName()) {
- Object* extra = GetFeedbackExtra();
- FixedArray* extra_array = FixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
- }
-
- return UNINITIALIZED;
-}
-
-InlineCacheState CallICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
- DCHECK(GetFeedbackExtra() ==
- *FeedbackVector::UninitializedSentinel(isolate) ||
- GetFeedbackExtra()->IsSmi());
-
- if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
- return MONOMORPHIC;
- }
+int FeedbackNexus::GetCallCount() {
+ DCHECK(IsCallICKind(kind()));
- CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
- return UNINITIALIZED;
-}
-
-int CallICNexus::GetCallCount() {
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return CallCountField::decode(value);
}
-void CallICNexus::SetSpeculationMode(SpeculationMode mode) {
+void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
+ DCHECK(IsCallICKind(kind()));
+
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
- int result = static_cast<int>(CallCountField::decode(value) |
- SpeculationModeField::encode(mode));
+ uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
+ uint32_t value = CallCountField::encode(CallCountField::decode(count));
+ int result = static_cast<int>(value | SpeculationModeField::encode(mode));
SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
}
-SpeculationMode CallICNexus::GetSpeculationMode() {
+SpeculationMode FeedbackNexus::GetSpeculationMode() {
+ DCHECK(IsCallICKind(kind()));
+
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return SpeculationModeField::decode(value);
}
-float CallICNexus::ComputeCallFrequency() {
+
+float FeedbackNexus::ComputeCallFrequency() {
+ DCHECK(IsCallICKind(kind()));
+
double const invocation_count = vector()->invocation_count();
double const call_count = GetCallCount();
if (invocation_count == 0) {
@@ -752,25 +727,23 @@ float CallICNexus::ComputeCallFrequency() {
return static_cast<float>(call_count / invocation_count);
}
-void CallICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
-}
-
void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- if (name.is_null()) {
+ if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
SetFeedback(*cell);
- SetFeedbackExtra(*handler);
+ SetFeedbackExtra(*name);
} else {
- Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
- SetFeedback(*name);
- array->set(0, *cell);
- array->set(1, *handler);
+ if (name.is_null()) {
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
+ } else {
+ Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
+ array->set(0, *cell);
+ array->set(1, *handler);
+ }
}
}
@@ -798,6 +771,10 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
}
int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -831,6 +808,10 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
}
MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -868,6 +849,10 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
}
bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
int count = 0;
@@ -901,23 +886,18 @@ bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
return count == length;
}
-Name* KeyedLoadICNexus::FindFirstName() const {
- Object* feedback = GetFeedback();
- if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback);
- }
- return nullptr;
-}
-
-Name* KeyedStoreICNexus::FindFirstName() const {
- Object* feedback = GetFeedback();
- if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback);
+Name* FeedbackNexus::FindFirstName() const {
+ if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
+ Object* feedback = GetFeedback();
+ if (IsPropertyNameFeedback(feedback)) {
+ return Name::cast(feedback);
+ }
}
return nullptr;
}
-KeyedAccessLoadMode KeyedLoadICNexus::GetKeyedAccessLoadMode() const {
+KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
+ DCHECK(IsKeyedLoadICKind(kind()));
MapHandles maps;
ObjectHandles handlers;
@@ -933,7 +913,8 @@ KeyedAccessLoadMode KeyedLoadICNexus::GetKeyedAccessLoadMode() const {
return STANDARD_LOAD;
}
-KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
+KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
+ DCHECK(IsKeyedStoreICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandles maps;
ObjectHandles handlers;
@@ -974,7 +955,8 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
return mode;
}
-IcCheckType KeyedLoadICNexus::GetKeyType() const {
+IcCheckType FeedbackNexus::GetKeyType() const {
+ DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()));
Object* feedback = GetFeedback();
if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
@@ -982,79 +964,31 @@ IcCheckType KeyedLoadICNexus::GetKeyType() const {
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
-IcCheckType KeyedStoreICNexus::GetKeyType() const {
- Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
- return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
- }
- return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
-}
-
-InlineCacheState BinaryOpICNexus::StateFromFeedback() const {
- BinaryOperationHint hint = GetBinaryOperationFeedback();
- if (hint == BinaryOperationHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == BinaryOperationHint::kAny) {
- return GENERIC;
- }
-
- return MONOMORPHIC;
-}
-
-InlineCacheState CompareICNexus::StateFromFeedback() const {
- CompareOperationHint hint = GetCompareOperationFeedback();
- if (hint == CompareOperationHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == CompareOperationHint::kAny) {
- return GENERIC;
- }
-
- return MONOMORPHIC;
-}
-
-BinaryOperationHint BinaryOpICNexus::GetBinaryOperationFeedback() const {
+BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
int feedback = Smi::ToInt(GetFeedback());
return BinaryOperationHintFromFeedback(feedback);
}
-CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
+CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
int feedback = Smi::ToInt(GetFeedback());
return CompareOperationHintFromFeedback(feedback);
}
-InlineCacheState ForInICNexus::StateFromFeedback() const {
- ForInHint hint = GetForInFeedback();
- if (hint == ForInHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == ForInHint::kAny) {
- return GENERIC;
- }
- return MONOMORPHIC;
-}
-
-ForInHint ForInICNexus::GetForInFeedback() const {
+ForInHint FeedbackNexus::GetForInFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
int feedback = Smi::ToInt(GetFeedback());
return ForInHintFromFeedback(feedback);
}
-void InstanceOfICNexus::ConfigureUninitialized() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+Handle<FeedbackCell> FeedbackNexus::GetFeedbackCell() const {
+ DCHECK_EQ(FeedbackSlotKind::kCreateClosure, kind());
+ return handle(FeedbackCell::cast(GetFeedback()));
}
-InlineCacheState InstanceOfICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- }
- return MONOMORPHIC;
-}
-
-MaybeHandle<JSObject> InstanceOfICNexus::GetConstructorFeedback() const {
+MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
if (feedback->IsWeakCell() && !WeakCell::cast(feedback)->cleared()) {
@@ -1063,38 +997,6 @@ MaybeHandle<JSObject> InstanceOfICNexus::GetConstructorFeedback() const {
return MaybeHandle<JSObject>();
}
-InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
-
- return MEGAMORPHIC;
-}
-
-void StoreDataPropertyInLiteralICNexus::ConfigureMonomorphic(
- Handle<Name> name, Handle<Map> receiver_map) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-
- SetFeedback(*cell);
- SetFeedbackExtra(*name);
-}
-
-InlineCacheState CollectTypeProfileNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- }
- return MONOMORPHIC;
-}
-
namespace {
bool InList(Handle<ArrayList> types, Handle<String> type) {
@@ -1108,44 +1010,42 @@ bool InList(Handle<ArrayList> types, Handle<String> type) {
}
} // anonymous namespace
-void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
+void FeedbackNexus::Collect(Handle<String> type, int position) {
+ DCHECK(IsTypeProfileKind(kind()));
DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
// Map source position to collection of types
- Handle<NumberDictionary> types;
+ Handle<SimpleNumberDictionary> types;
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- types = NumberDictionary::New(isolate, 1);
+ types = SimpleNumberDictionary::New(isolate, 1);
} else {
- types = handle(NumberDictionary::cast(feedback));
+ types = handle(SimpleNumberDictionary::cast(feedback));
}
Handle<ArrayList> position_specific_types;
int entry = types->FindEntry(position);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
position_specific_types = ArrayList::New(isolate, 1);
- types = NumberDictionary::Set(
+ types = SimpleNumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
} else {
DCHECK(types->ValueAt(entry)->IsArrayList());
position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
if (!InList(position_specific_types, type)) { // Add type
- types = NumberDictionary::Set(
+ types = SimpleNumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
}
}
SetFeedback(*types);
}
-void CollectTypeProfileNexus::Clear() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
-}
-
-std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
+std::vector<int> FeedbackNexus::GetSourcePositions() const {
+ DCHECK(IsTypeProfileKind(kind()));
std::vector<int> source_positions;
Isolate* isolate = GetIsolate();
@@ -1155,12 +1055,12 @@ std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
return source_positions;
}
- Handle<NumberDictionary> types =
- Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
+ SimpleNumberDictionary::cast(feedback), isolate);
- for (int index = NumberDictionary::kElementsStartIndex;
- index < types->length(); index += NumberDictionary::kEntrySize) {
- int key_index = index + NumberDictionary::kEntryKeyIndex;
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object* key = types->get(key_index);
if (key->IsSmi()) {
int position = Smi::cast(key)->value();
@@ -1170,8 +1070,9 @@ std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
return source_positions;
}
-std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
+std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
uint32_t position) const {
+ DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
@@ -1180,11 +1081,11 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
return types_for_position;
}
- Handle<NumberDictionary> types =
- Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
+ SimpleNumberDictionary::cast(feedback), isolate);
int entry = types->FindEntry(position);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
return types_for_position;
}
DCHECK(types->ValueAt(entry)->IsArrayList());
@@ -1201,16 +1102,17 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
namespace {
Handle<JSObject> ConvertToJSObject(Isolate* isolate,
- Handle<NumberDictionary> feedback) {
+ Handle<SimpleNumberDictionary> feedback) {
Handle<JSObject> type_profile =
isolate->factory()->NewJSObject(isolate->object_function());
- for (int index = NumberDictionary::kElementsStartIndex;
- index < feedback->length(); index += NumberDictionary::kEntrySize) {
- int key_index = index + NumberDictionary::kEntryKeyIndex;
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < feedback->length();
+ index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object* key = feedback->get(key_index);
if (key->IsSmi()) {
- int value_index = index + NumberDictionary::kEntryValueIndex;
+ int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
Handle<ArrayList> position_specific_types(
ArrayList::cast(feedback->get(value_index)));
@@ -1228,7 +1130,8 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
}
} // namespace
-JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
+JSObject* FeedbackNexus::GetTypeProfile() const {
+ DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
@@ -1237,7 +1140,13 @@ JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(isolate, handle(NumberDictionary::cast(feedback)));
+ return *ConvertToJSObject(isolate,
+ handle(SimpleNumberDictionary::cast(feedback)));
+}
+
+void FeedbackNexus::ResetTypeProfile() {
+ DCHECK(IsTypeProfileKind(kind()));
+ SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
}
} // namespace internal
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 9f8096d138..8faff32649 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -85,6 +85,10 @@ inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kStoreOwnNamed;
}
+inline bool IsStoreDataPropertyInLiteralKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreDataPropertyInLiteral;
+}
+
inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
kind == FeedbackSlotKind::kStoreKeyedStrict;
@@ -202,8 +206,8 @@ class FeedbackVector : public HeapObject {
FeedbackSlot GetTypeProfileSlot() const;
- static Handle<FeedbackVector> New(Isolate* isolate,
- Handle<SharedFunctionInfo> shared);
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector> New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
static Handle<FeedbackVector> Copy(Isolate* isolate,
Handle<FeedbackVector> vector);
@@ -293,15 +297,28 @@ class FeedbackVector : public HeapObject {
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
- void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
- FeedbackSlotKind kind); // NOLINT
-
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
};
-template <typename Derived>
-class V8_EXPORT_PRIVATE FeedbackVectorSpecBase {
+class V8_EXPORT_PRIVATE FeedbackVectorSpec {
public:
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+ slot_kinds_.reserve(16);
+ }
+
+ int slots() const { return static_cast<int>(slot_kinds_.size()); }
+
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+ return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+ }
+
+ bool HasTypeProfileSlot() const;
+
+ // If used, the TypeProfileSlot is always added as the first slot and its
+ // index is constant. If other slots are added before the TypeProfileSlot,
+ // this number changes.
+ static const int kTypeProfileSlotIndex = 0;
+
FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
FeedbackSlot AddLoadICSlot() {
@@ -379,58 +396,6 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpecBase {
private:
FeedbackSlot AddSlot(FeedbackSlotKind kind);
- Derived* This() { return static_cast<Derived*>(this); }
-};
-
-class StaticFeedbackVectorSpec
- : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
- public:
- StaticFeedbackVectorSpec() : slot_count_(0) {}
-
- int slots() const { return slot_count_; }
-
- FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- DCHECK(slot.ToInt() >= 0 && slot.ToInt() < slot_count_);
- return kinds_[slot.ToInt()];
- }
-
- private:
- friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
-
- void append(FeedbackSlotKind kind) {
- DCHECK_LT(slot_count_, kMaxLength);
- kinds_[slot_count_++] = kind;
- }
-
- static const int kMaxLength = 12;
-
- int slot_count_;
- FeedbackSlotKind kinds_[kMaxLength];
-};
-
-class V8_EXPORT_PRIVATE FeedbackVectorSpec
- : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
- public:
- explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
- slot_kinds_.reserve(16);
- }
-
- int slots() const { return static_cast<int>(slot_kinds_.size()); }
-
- FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
- }
-
- bool HasTypeProfileSlot() const;
-
- // If used, the TypeProfileSlot is always added as the first slot and its
- // index is constant. If other slots are added before the TypeProfileSlot,
- // this number changes.
- static const int kTypeProfileSlotIndex = 0;
-
- private:
- friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
-
void append(FeedbackSlotKind kind) {
slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
@@ -465,8 +430,9 @@ class FeedbackMetadata : public FixedArray {
// Returns slot kind for given slot.
FeedbackSlotKind GetKind(FeedbackSlot slot) const;
- template <typename Spec>
- static Handle<FeedbackMetadata> New(Isolate* isolate, const Spec* spec);
+ // If {spec} is null, then it is considered empty.
+ V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
+ Isolate* isolate, const FeedbackVectorSpec* spec = nullptr);
#ifdef OBJECT_PRINT
// For gdb debugging.
@@ -551,14 +517,15 @@ class FeedbackMetadataIterator {
};
// A FeedbackNexus is the combination of a FeedbackVector and a slot.
-// Derived classes customize the update and retrieval of feedback.
-class FeedbackNexus {
+class FeedbackNexus final {
public:
FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector), vector_(nullptr), slot_(slot) {}
+ : vector_handle_(vector),
+ vector_(nullptr),
+ slot_(slot),
+ kind_(vector->GetKind(slot)) {}
FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
- virtual ~FeedbackNexus() {}
+ : vector_(vector), slot_(slot), kind_(vector->GetKind(slot)) {}
Handle<FeedbackVector> vector_handle() const {
DCHECK_NULL(vector_);
@@ -568,12 +535,20 @@ class FeedbackNexus {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
FeedbackSlot slot() const { return slot_; }
- FeedbackSlotKind kind() const { return vector()->GetKind(slot()); }
+ FeedbackSlotKind kind() const { return kind_; }
+
+ inline LanguageMode GetLanguageMode() const {
+ return vector()->GetLanguageMode(slot());
+ }
InlineCacheState ic_state() const { return StateFromFeedback(); }
bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
bool IsMegamorphic() const { return StateFromFeedback() == MEGAMORPHIC; }
bool IsGeneric() const { return StateFromFeedback() == GENERIC; }
+
+ void Print(std::ostream& os); // NOLINT
+
+ // For map-based ICs (load, keyed-load, store, keyed-store).
Map* FindFirstMap() const {
MapHandles maps;
ExtractMaps(&maps);
@@ -581,19 +556,19 @@ class FeedbackNexus {
return nullptr;
}
- virtual InlineCacheState StateFromFeedback() const = 0;
- virtual int ExtractMaps(MapHandles* maps) const;
- virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
- virtual bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
- virtual Name* FindFirstName() const { return nullptr; }
+ InlineCacheState StateFromFeedback() const;
+ int ExtractMaps(MapHandles* maps) const;
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
- bool IsCleared() {
+ bool IsCleared() const {
InlineCacheState state = StateFromFeedback();
return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
- virtual void Clear() { ConfigureUninitialized(); }
- virtual void ConfigureUninitialized();
+ // Clear() returns true if the state of the underlying vector was changed.
+ bool Clear();
+ void ConfigureUninitialized();
void ConfigurePremonomorphic();
bool ConfigureMegamorphic(IcCheckType property_type);
@@ -608,51 +583,21 @@ class FeedbackNexus {
void ConfigurePolymorphic(Handle<Name> name, MapHandles const& maps,
ObjectHandles* handlers);
- protected:
- inline void SetFeedback(Object* feedback,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(Object* feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- Handle<FixedArray> EnsureArrayOfSize(int length);
- Handle<FixedArray> EnsureExtraArrayOfSize(int length);
-
- private:
- // The reason for having a vector handle and a raw pointer is that we can and
- // should use handles during IC miss, but not during GC when we clear ICs. If
- // you have a handle to the vector that is better because more operations can
- // be done, like allocation.
- Handle<FeedbackVector> vector_handle_;
- FeedbackVector* vector_;
- FeedbackSlot slot_;
-};
-
-class CallICNexus final : public FeedbackNexus {
- public:
- CallICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsCallIC(slot));
- }
- CallICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsCallIC(slot));
- }
+ BinaryOperationHint GetBinaryOperationFeedback() const;
+ CompareOperationHint GetCompareOperationFeedback() const;
+ ForInHint GetForInFeedback() const;
- void ConfigureUninitialized() final;
+ // For KeyedLoad ICs.
+ KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
- InlineCacheState StateFromFeedback() const final;
+ // For KeyedStore ICs.
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
- int ExtractMaps(MapHandles* maps) const final {
- // CallICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
+ // For KeyedLoad and KeyedStore ICs.
+ IcCheckType GetKeyType() const;
+ Name* FindFirstName() const;
+ // For Call ICs.
int GetCallCount();
void SetSpeculationMode(SpeculationMode mode);
SpeculationMode GetSpeculationMode();
@@ -663,91 +608,20 @@ class CallICNexus final : public FeedbackNexus {
typedef BitField<SpeculationMode, 0, 1> SpeculationModeField;
typedef BitField<uint32_t, 1, 31> CallCountField;
-};
-
-class LoadICNexus : public FeedbackNexus {
- public:
- LoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadIC(slot));
- }
- LoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- InlineCacheState StateFromFeedback() const override;
-};
-
-class KeyedLoadICNexus : public FeedbackNexus {
- public:
- KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedLoadIC(slot));
- }
- KeyedLoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedLoadIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
- IcCheckType GetKeyType() const;
- InlineCacheState StateFromFeedback() const override;
- Name* FindFirstName() const override;
-};
-
-class StoreICNexus : public FeedbackNexus {
- public:
- StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
- }
- StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- InlineCacheState StateFromFeedback() const override;
-};
-// Base class for LoadGlobalICNexus and StoreGlobalICNexus.
-class GlobalICNexus : public FeedbackNexus {
- public:
- GlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsGlobalIC(slot));
- }
- GlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsGlobalIC(slot));
- }
+ // For CreateClosure ICs.
+ Handle<FeedbackCell> GetFeedbackCell() const;
- int ExtractMaps(MapHandles* maps) const final {
- // Load/StoreGlobalICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
+ // For InstanceOf ICs.
+ MaybeHandle<JSObject> GetConstructorFeedback() const;
- void ConfigureUninitialized() override;
+ // For Global Load and Store ICs.
void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
// Returns false if given combination of indices is not allowed.
bool ConfigureLexicalVarMode(int script_context_index,
int context_slot_index);
void ConfigureHandlerMode(Handle<Object> handler);
- InlineCacheState StateFromFeedback() const override;
-
// Bit positions in a smi that encodes lexical environment variable access.
#define LEXICAL_MODE_BIT_FIELDS(V, _) \
V(ContextIndexBits, unsigned, 12, _) \
@@ -758,187 +632,10 @@ class GlobalICNexus : public FeedbackNexus {
// Make sure we don't overflow the smi.
STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize);
-};
-
-class LoadGlobalICNexus : public GlobalICNexus {
- public:
- LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
- LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
-};
-
-class StoreGlobalICNexus : public GlobalICNexus {
- public:
- StoreGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsStoreGlobalIC(slot));
- }
- StoreGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsStoreGlobalIC(slot));
- }
-};
-// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
-// already exist in the boilerplate therefore we can use StoreIC.
-typedef StoreICNexus StoreOwnICNexus;
-
-class KeyedStoreICNexus : public FeedbackNexus {
- public:
- KeyedStoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedStoreIC(slot));
- }
- KeyedStoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedStoreIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
- IcCheckType GetKeyType() const;
-
- InlineCacheState StateFromFeedback() const override;
- Name* FindFirstName() const override;
-};
-
-class BinaryOpICNexus final : public FeedbackNexus {
- public:
- BinaryOpICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
- }
- BinaryOpICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- BinaryOperationHint GetBinaryOperationFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final {
- // BinaryOpICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class CompareICNexus final : public FeedbackNexus {
- public:
- CompareICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
- }
- CompareICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- CompareOperationHint GetCompareOperationFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final {
- // CompareICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class ForInICNexus final : public FeedbackNexus {
- public:
- ForInICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
- }
- ForInICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- ForInHint GetForInFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final { return 0; }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class InstanceOfICNexus final : public FeedbackNexus {
- public:
- InstanceOfICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
- }
- InstanceOfICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
- }
-
- void ConfigureUninitialized() final;
-
- InlineCacheState StateFromFeedback() const final;
- MaybeHandle<JSObject> GetConstructorFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final { return 0; }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
- public:
- StoreDataPropertyInLiteralICNexus(Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
- vector->GetKind(slot));
- }
- StoreDataPropertyInLiteralICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
- vector->GetKind(slot));
- }
-
- void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map);
-
- InlineCacheState StateFromFeedback() const override;
-};
-
-// For each assignment, store the type of the value in the collection of types
-// in the feedback vector.
-class CollectTypeProfileNexus : public FeedbackNexus {
- public:
- CollectTypeProfileNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kTypeProfile, vector->GetKind(slot));
- }
- CollectTypeProfileNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kTypeProfile, vector->GetKind(slot));
- }
+ // For TypeProfile feedback vector slots.
+ // ResetTypeProfile will always reset type profile information.
+ void ResetTypeProfile();
// Add a type to the list of types for source position <position>.
void Collect(Handle<String> type, int position);
@@ -947,9 +644,24 @@ class CollectTypeProfileNexus : public FeedbackNexus {
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- void Clear() override;
+ protected:
+ inline void SetFeedback(Object* feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedbackExtra(Object* feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ Handle<FixedArray> EnsureArrayOfSize(int length);
+ Handle<FixedArray> EnsureExtraArrayOfSize(int length);
- InlineCacheState StateFromFeedback() const override;
+ private:
+ // The reason for having a vector handle and a raw pointer is that we can and
+ // should use handles during IC miss, but not during GC when we clear ICs. If
+ // you have a handle to the vector that is better because more operations can
+ // be done, like allocation.
+ Handle<FeedbackVector> vector_handle_;
+ FeedbackVector* vector_;
+ FeedbackSlot slot_;
+ FeedbackSlotKind kind_;
};
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 61540773db..41fddb6e0b 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -99,4 +99,4 @@ inline FieldIndex FieldIndex::ForDescriptor(const Map* map,
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_FIELD_INDEX_INL_H_
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 9e390e3d46..a1552f050e 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -135,4 +135,4 @@ class FieldIndex final {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_FIELD_INDEX_H_
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 40114f76d3..8eec7a5b58 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -7,7 +7,6 @@
#include "src/objects.h"
#include "src/objects/map.h"
-#include "src/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 8c3a08f81a..8fceed0783 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -9,6 +9,8 @@
// This include does not have a guard, because it is a template-style include,
// which can be included multiple times in different modes. It expects to have
// a mode defined before it's included. The modes are FLAG_MODE_... below:
+//
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
#define DEFINE_IMPLICATION(whenflag, thenflag) \
DEFINE_VALUE_IMPLICATION(whenflag, thenflag, true)
@@ -203,33 +205,36 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
+// Update bootstrapper.cc whenever adding a new feature flag.
+
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
- V(harmony_import_meta, "harmony import.meta property") \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_bigint, "harmony arbitrary precision integers") \
- V(harmony_private_fields, "harmony private fields in class literals")
+ V(harmony_bigint, "harmony arbitrary precision integers")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
- V(harmony_function_tostring, "harmony Function.prototype.toString") \
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
"constructor") \
- V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_public_fields, "harmony public fields in class literals") \
- V(harmony_optional_catch_binding, "allow omitting binding in catch blocks")
+ V(harmony_private_fields, "harmony private fields in class literals")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_regexp_named_captures, "harmony regexp named captures") \
- V(harmony_regexp_property, "harmony Unicode regexp property classes") \
- V(harmony_promise_finally, "harmony Promise.prototype.finally")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_subsume_json, "harmony subsume JSON") \
+ V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_regexp_named_captures, "harmony regexp named captures") \
+ V(harmony_regexp_property, "harmony Unicode regexp property classes") \
+ V(harmony_function_tostring, "harmony Function.prototype.toString") \
+ V(harmony_promise_finally, "harmony Promise.prototype.finally") \
+ V(harmony_optional_catch_binding, "allow omitting binding in catch blocks") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_dynamic_import, "harmony dynamic import")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
@@ -278,7 +283,6 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, background_compile)
DEFINE_IMPLICATION(future, write_protect_code_memory)
// Flags for experimental implementation features.
@@ -320,6 +324,18 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+// Flags for Ignition for no-snapshot builds.
+#undef FLAG
+#ifndef V8_USE_SNAPSHOT
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+DEFINE_INT(interrupt_budget, 144 * KB,
+ "interrupt budget which should be used for the profiler counter")
+#undef FLAG
+#define FLAG FLAG_FULL
+
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
"elide bytecodes which won't have any external effect")
@@ -470,6 +486,8 @@ DEFINE_BOOL(turbo_store_elimination, true,
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
+DEFINE_BOOL(experimental_inline_promise_constructor, false,
+ "inline the Promise constructor in TurboFan")
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
@@ -480,6 +498,11 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
+DEFINE_BOOL(turbo_disable_switch_jump_table, false,
+ "do not emit jump-tables in Turbofan")
+DEFINE_IMPLICATION(untrusted_code_mitigations, turbo_disable_switch_jump_table)
+DEFINE_BOOL(branch_load_poisoning, false, "Mask loads with branch conditions.")
+
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
@@ -562,6 +585,8 @@ DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
"enable non-trapping float-to-int conversions for wasm")
+DEFINE_BOOL(experimental_wasm_se, false,
+ "enable prototype sign extension opcodes for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -632,6 +657,9 @@ DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
+DEFINE_INT(trace_duplicate_threshold_kb, 0,
+ "print duplicate objects in the heap if their size is more than "
+ "given threshold")
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
@@ -641,6 +669,7 @@ DEFINE_BOOL(trace_mutator_utilization, false,
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
+DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
@@ -733,6 +762,8 @@ DEFINE_INT(stress_scavenge, 0,
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_marking)
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_scavenge)
+DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function")
+
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -894,7 +925,10 @@ DEFINE_INT(histogram_interval, 600000,
// heap-snapshot-generator.cc
DEFINE_BOOL(heap_profiler_trace_objects, false,
"Dump heap object allocations/movements/size_updates")
-
+DEFINE_BOOL(heap_profiler_use_embedder_graph, true,
+ "Use the new EmbedderGraph API to get embedder nodes")
+DEFINE_INT(heap_snapshot_string_limit, 1024,
+ "truncate strings to this length in the heap snapshot")
// sampling-heap-profiler.cc
DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
@@ -941,7 +975,7 @@ DEFINE_BOOL(preparser_scope_analysis, true,
DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
// compiler.cc
-DEFINE_BOOL(background_compile, false, "enable background compilation")
+DEFINE_BOOL(background_compile, true, "enable background compilation")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -995,6 +1029,13 @@ DEFINE_INT(fuzzer_random_seed, 0,
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
+#ifdef V8_EMBEDDED_BUILTINS
+DEFINE_BOOL(stress_off_heap_code, false,
+ "Move code objects off-heap for testing.")
+#else
+FLAG_READONLY(BOOL, bool, stress_off_heap_code, false,
+ "Move code objects off-heap for testing.")
+#endif
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1277,6 +1318,7 @@ DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_IMPLICATION(predictable, single_threaded)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
+DEFINE_NEG_IMPLICATION(single_threaded, wasm_async_compilation)
//
// Threading related flags.
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index d5a04ad933..a63a85e7fc 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -177,18 +177,32 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
Code* interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return (pc >= interpreter_entry_trampoline->instruction_start() &&
- pc < interpreter_entry_trampoline->instruction_end()) ||
- (pc >= interpreter_bytecode_advance->instruction_start() &&
- pc < interpreter_bytecode_advance->instruction_end()) ||
- (pc >= interpreter_bytecode_dispatch->instruction_start() &&
- pc < interpreter_bytecode_dispatch->instruction_end());
+ return (pc >= interpreter_entry_trampoline->InstructionStart() &&
+ pc < interpreter_entry_trampoline->InstructionEnd()) ||
+ (pc >= interpreter_bytecode_advance->InstructionStart() &&
+ pc < interpreter_bytecode_advance->InstructionEnd()) ||
+ (pc >= interpreter_bytecode_dispatch->InstructionStart() &&
+ pc < interpreter_bytecode_dispatch->InstructionEnd());
}
DISABLE_ASAN Address ReadMemoryAt(Address address) {
return Memory::Address_at(address);
}
+WasmInstanceObject* LookupWasmInstanceObjectFromStandardFrame(
+ const StandardFrame* frame) {
+ // TODO(titzer): WASM instances cannot be found from the code in the future.
+ WasmInstanceObject* ret =
+ FLAG_wasm_jit_to_native
+ ? WasmInstanceObject::GetOwningInstance(
+ frame->isolate()->wasm_engine()->code_manager()->LookupCode(
+ frame->pc()))
+ : WasmInstanceObject::GetOwningInstanceGC(frame->LookupCode());
+ // This is a live stack frame, there must be a live wasm instance available.
+ DCHECK_NOT_NULL(ret);
+ return ret;
+}
+
} // namespace
SafeStackFrameIterator::SafeStackFrameIterator(
@@ -376,8 +390,8 @@ Code* GetContainingCode(Isolate* isolate, Address pc) {
Code* StackFrame::LookupCode() const {
Code* result = GetContainingCode(isolate(), pc());
- DCHECK_GE(pc(), result->instruction_start());
- DCHECK_LT(pc(), result->instruction_end());
+ DCHECK_GE(pc(), result->InstructionStart());
+ DCHECK_LT(pc(), result->InstructionEnd());
return result;
}
@@ -385,12 +399,12 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code* holder) {
Address pc = *pc_address;
DCHECK(holder->GetHeap()->GcSafeCodeContains(holder, pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
+ unsigned pc_offset = static_cast<unsigned>(pc - holder->InstructionStart());
Object* code = holder;
- v->VisitRootPointer(Root::kTop, &code);
+ v->VisitRootPointer(Root::kTop, nullptr, &code);
if (code == holder) return;
holder = reinterpret_cast<Code*>(code);
- pc = holder->instruction_start() + pc_offset;
+ pc = holder->InstructionStart() + pc_offset;
*pc_address = pc;
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
*constant_pool_address = holder->constant_pool();
@@ -600,7 +614,7 @@ void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
- v->VisitRootPointer(Root::kTop, &code_slot());
+ v->VisitRootPointer(Root::kTop, nullptr, &code_slot());
}
@@ -688,8 +702,28 @@ void PrintIndex(StringStream* accumulator, StackFrame::PrintMode mode,
int index) {
accumulator->Add((mode == StackFrame::OVERVIEW) ? "%5d: " : "[%d]: ", index);
}
+
+const char* StringForStackFrameType(StackFrame::Type type) {
+ switch (type) {
+#define CASE(value, name) \
+ case StackFrame::value: \
+ return #name;
+ STACK_FRAME_TYPE_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+}
} // namespace
+void StackFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ DisallowHeapAllocation no_gc;
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add(StringForStackFrameType(type()));
+ accumulator->Add(" [pc: %p]\n", pc());
+}
+
void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
DisallowHeapAllocation no_gc;
@@ -868,7 +902,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit the parameters that may be on top of the saved registers.
if (safepoint_entry.argument_count() > 0) {
- v->VisitRootPointers(Root::kTop, parameters_base,
+ v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
parameters_base + safepoint_entry.argument_count());
parameters_base += safepoint_entry.argument_count();
}
@@ -887,7 +921,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
if (safepoint_entry.HasRegisterAt(i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitRootPointer(Root::kTop, parameters_base + reg_stack_index);
+ v->VisitRootPointer(Root::kTop, nullptr,
+ parameters_base + reg_stack_index);
}
}
// Skip the words containing the register values.
@@ -900,7 +935,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit the rest of the parameters if they are tagged.
if (has_tagged_params) {
- v->VisitRootPointers(Root::kTop, parameters_base, parameters_limit);
+ v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
+ parameters_limit);
}
// Visit pointer spill slots and locals.
@@ -908,7 +944,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- v->VisitRootPointer(Root::kTop, parameters_limit + index);
+ v->VisitRootPointer(Root::kTop, nullptr, parameters_limit + index);
}
}
@@ -921,7 +957,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!is_wasm() && !is_wasm_to_js()) {
// If this frame has JavaScript ABI, visit the context (in stub and JS
// frames) and the function (in JS frames).
- v->VisitRootPointers(Root::kTop, frame_header_base, frame_header_limit);
+ v->VisitRootPointers(Root::kTop, nullptr, frame_header_base,
+ frame_header_limit);
}
}
@@ -945,10 +982,10 @@ int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
Code* code = LookupCode();
DCHECK(code->is_turbofanned());
DCHECK_EQ(code->kind(), Code::BUILTIN);
- HandlerTable* table = HandlerTable::cast(code->handler_table());
- int pc_offset = static_cast<int>(pc() - code->entry());
+ HandlerTable table(code);
+ int pc_offset = static_cast<int>(pc() - code->InstructionStart());
*stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
@@ -1011,7 +1048,7 @@ void JavaScriptFrame::GetFunctions(
void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code* code = LookupCode();
- int offset = static_cast<int>(pc() - code->instruction_start());
+ int offset = static_cast<int>(pc() - code->InstructionStart());
AbstractCode* abstract_code = AbstractCode::cast(code);
FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
function(), abstract_code,
@@ -1047,7 +1084,7 @@ Script* JavaScriptFrame::script() const {
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
- DCHECK_EQ(0, LookupCode()->handler_table()->length());
+ DCHECK_EQ(0, LookupCode()->handler_table_offset());
DCHECK(!LookupCode()->is_optimized_code());
return -1;
}
@@ -1097,7 +1134,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = iframe->GetBytecodeOffset();
} else {
Code* code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
file, print_line_number);
@@ -1155,7 +1192,7 @@ void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
code_offset = iframe->GetBytecodeOffset();
} else {
Code* code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
code_offset);
@@ -1467,8 +1504,8 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
// code to perform prediction there.
DCHECK_NULL(prediction);
Code* code = LookupCode();
- HandlerTable* table = HandlerTable::cast(code->handler_table());
- int pc_offset = static_cast<int>(pc() - code->entry());
+ HandlerTable table(code);
+ int pc_offset = static_cast<int>(pc() - code->InstructionStart());
if (stack_slots) *stack_slots = code->stack_slots();
// When the return pc has been replaced by a trampoline there won't be
@@ -1479,7 +1516,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
SafepointTable safepoints(code);
pc_offset = safepoints.find_return_pc(pc_offset);
}
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
DeoptimizationData* OptimizedFrame::GetDeoptimizationData(
@@ -1588,9 +1625,8 @@ int InterpretedFrame::position() const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
- BytecodeArray* bytecode = function()->shared()->bytecode_array();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- return table->LookupRange(GetBytecodeOffset(), context_register, prediction);
+ HandlerTable table(function()->shared()->bytecode_array());
+ return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
int InterpretedFrame::GetBytecodeOffset() const {
@@ -1711,9 +1747,8 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
.start()
: LookupCode()->instruction_start();
int pc = static_cast<int>(this->pc() - instruction_start);
- WasmSharedModuleData* shared = wasm_instance()->compiled_module()->shared();
Vector<const uint8_t> raw_func_name =
- shared->GetRawFunctionName(this->function_index());
+ shared()->GetRawFunctionName(this->function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
@@ -1744,23 +1779,24 @@ WasmCodeWrapper WasmCompiledFrame::wasm_code() const {
}
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
- WasmInstanceObject* obj =
- FLAG_wasm_jit_to_native
- ? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
- : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
- // This is a live stack frame; it must have a live instance.
- DCHECK_NOT_NULL(obj);
- return obj;
+ return LookupWasmInstanceObjectFromStandardFrame(this);
+}
+
+WasmSharedModuleData* WasmCompiledFrame::shared() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)
+ ->compiled_module()
+ ->shared();
+}
+
+WasmCompiledModule* WasmCompiledFrame::compiled_module() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->compiled_module();
}
uint32_t WasmCompiledFrame::function_index() const {
return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Script* WasmCompiledFrame::script() const {
- return wasm_instance()->compiled_module()->shared()->script();
-}
+Script* WasmCompiledFrame::script() const { return shared()->script(); }
int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
@@ -1770,7 +1806,8 @@ void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
WasmCodeWrapper code = wasm_code();
int offset = static_cast<int>(pc() - code.instructions().start());
- Handle<WasmInstanceObject> instance = code.wasm_instance();
+ Handle<WasmInstanceObject> instance(
+ LookupWasmInstanceObjectFromStandardFrame(this), isolate());
FrameSummary::WasmCompiledFrameSummary summary(
isolate(), instance, code, offset, at_to_number_conversion());
functions->push_back(summary);
@@ -1805,22 +1842,19 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
if (!FLAG_wasm_jit_to_native) {
Code* code = LookupCode();
- HandlerTable* table = HandlerTable::cast(code->handler_table());
+ HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code->entry());
*stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
- if (!code->IsAnonymous()) {
- Object* table_entry =
- code->owner()->compiled_module()->handler_table()->get(code->index());
- if (table_entry->IsHandlerTable()) {
- HandlerTable* table = HandlerTable::cast(table_entry);
- int pc_offset = static_cast<int>(pc() - code->instructions().start());
- *stack_slots = static_cast<int>(code->stack_slots());
- return table->LookupReturn(pc_offset);
- }
+ if (!code->IsAnonymous() && code->handler_table_offset() > 0) {
+ HandlerTable table(code->instructions().start(),
+ code->handler_table_offset());
+ int pc_offset = static_cast<int>(pc() - code->instructions().start());
+ *stack_slots = static_cast<int>(code->stack_slots());
+ return table.LookupReturn(pc_offset);
}
return -1;
}
@@ -1841,7 +1875,8 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
void WasmInterpreterEntryFrame::Summarize(
std::vector<FrameSummary>* functions) const {
- Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+ Handle<WasmInstanceObject> instance(
+ LookupWasmInstanceObjectFromStandardFrame(this), isolate());
std::vector<std::pair<uint32_t, int>> interpreted_stack =
instance->debug_info()->GetInterpretedStack(fp());
@@ -1860,27 +1895,33 @@ Code* WasmInterpreterEntryFrame::unchecked_code() const {
}
}
+// TODO(titzer): deprecate this method.
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
- WasmInstanceObject* ret =
- FLAG_wasm_jit_to_native
- ? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
- : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
- // This is a live stack frame, there must be a live wasm instance available.
- DCHECK_NOT_NULL(ret);
- return ret;
+ return LookupWasmInstanceObjectFromStandardFrame(this);
}
-Script* WasmInterpreterEntryFrame::script() const {
- return wasm_instance()->compiled_module()->shared()->script();
+WasmDebugInfo* WasmInterpreterEntryFrame::debug_info() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->debug_info();
}
+WasmSharedModuleData* WasmInterpreterEntryFrame::shared() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)
+ ->compiled_module()
+ ->shared();
+}
+
+WasmCompiledModule* WasmInterpreterEntryFrame::compiled_module() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->compiled_module();
+}
+
+Script* WasmInterpreterEntryFrame::script() const { return shared()->script(); }
+
int WasmInterpreterEntryFrame::position() const {
return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
Object* WasmInterpreterEntryFrame::context() const {
- return wasm_instance()->compiled_module()->native_context();
+ return compiled_module()->native_context();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
@@ -2081,7 +2122,7 @@ void StandardFrame::IterateExpressions(RootVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitRootPointers(Root::kTop, base, limit);
+ v->VisitRootPointers(Root::kTop, nullptr, base, limit);
}
void JavaScriptFrame::Iterate(RootVisitor* v) const {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 0c988770f6..2bea6a3ca3 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -5,8 +5,6 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
-#include "src/allocation.h"
-#include "src/flags.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/code.h"
@@ -18,18 +16,20 @@ namespace wasm {
class WasmCode;
}
+// Forward declarations.
class AbstractCode;
class Debug;
-class ObjectVisitor;
-class StringStream;
-
-// Forward declarations.
class ExternalCallbackScope;
class Isolate;
+class ObjectVisitor;
class RootVisitor;
class StackFrameIteratorBase;
+class StringStream;
class ThreadLocalTop;
+class WasmCompiledModule;
+class WasmDebugInfo;
class WasmInstanceObject;
+class WasmSharedModuleData;
class InnerPointerToCodeCache {
public:
@@ -286,9 +286,8 @@ class StackFrame BASE_EMBEDDED {
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const { }
+ virtual void Print(StringStream* accumulator, PrintMode mode,
+ int index) const;
Isolate* isolate() const { return isolate_; }
@@ -890,6 +889,11 @@ class InterpretedFrame : public JavaScriptFrame {
static int GetBytecodeOffset(Address fp);
+ static InterpretedFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_interpreted());
+ return static_cast<InterpretedFrame*>(frame);
+ }
+
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
@@ -968,7 +972,7 @@ class WasmCompiledFrame final : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- WasmInstanceObject* wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const; // TODO(titzer): deprecate.
WasmCodeWrapper wasm_code() const;
uint32_t function_index() const;
Script* script() const override;
@@ -989,6 +993,8 @@ class WasmCompiledFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
+ WasmCompiledModule* compiled_module() const;
+ WasmSharedModuleData* shared() const;
};
class WasmInterpreterEntryFrame final : public StandardFrame {
@@ -1008,7 +1014,9 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- WasmInstanceObject* wasm_instance() const;
+ WasmDebugInfo* debug_info() const;
+ WasmInstanceObject* wasm_instance() const; // TODO(titzer): deprecate.
+
Script* script() const override;
int position() const override;
Object* context() const override;
@@ -1025,6 +1033,8 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
+ WasmCompiledModule* compiled_module() const;
+ WasmSharedModuleData* shared() const;
};
class WasmToJsFrame : public StubFrame {
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index b915e71ebe..7ffc6459fb 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -36,4 +36,4 @@ void EventHandler(const v8::JitCodeEvent* event);
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_GDB_JIT_H_
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 7845d71fb1..fe87060fb0 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -54,7 +54,7 @@ class GlobalHandles::Node {
index_ = 0;
set_active(false);
set_in_new_space_list(false);
- parameter_or_next_free_.next_free = nullptr;
+ data_.next_free = nullptr;
weak_callback_ = nullptr;
}
#endif
@@ -65,7 +65,7 @@ class GlobalHandles::Node {
DCHECK(static_cast<int>(index_) == index);
set_state(FREE);
set_in_new_space_list(false);
- parameter_or_next_free_.next_free = *first_free;
+ data_.next_free = *first_free;
*first_free = this;
}
@@ -75,7 +75,7 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_active(false);
set_state(NORMAL);
- parameter_or_next_free_.parameter = nullptr;
+ data_.parameter = nullptr;
weak_callback_ = nullptr;
IncreaseBlockUses();
}
@@ -100,6 +100,7 @@ class GlobalHandles::Node {
// Object slot accessors.
Object* object() const { return object_; }
Object** location() { return &object_; }
+ const char* label() { return state() == NORMAL ? data_.label : nullptr; }
Handle<Object> handle() { return Handle<Object>(location()); }
// Wrapper class ID accessors.
@@ -185,21 +186,21 @@ class GlobalHandles::Node {
// Callback parameter accessors.
void set_parameter(void* parameter) {
DCHECK(IsInUse());
- parameter_or_next_free_.parameter = parameter;
+ data_.parameter = parameter;
}
void* parameter() const {
DCHECK(IsInUse());
- return parameter_or_next_free_.parameter;
+ return data_.parameter;
}
// Accessors for next free node in the free list.
Node* next_free() {
DCHECK(state() == FREE);
- return parameter_or_next_free_.next_free;
+ return data_.next_free;
}
void set_next_free(Node* value) {
DCHECK(state() == FREE);
- parameter_or_next_free_.next_free = value;
+ data_.next_free = value;
}
void MakeWeak(void* parameter,
@@ -241,6 +242,11 @@ class GlobalHandles::Node {
return p;
}
+ void AnnotateStrongRetainer(const char* label) {
+ DCHECK_EQ(state(), NORMAL);
+ data_.label = label;
+ }
+
void CollectPhantomCallbackData(
Isolate* isolate,
std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
@@ -346,12 +352,15 @@ class GlobalHandles::Node {
// Handle specific callback - might be a weak reference in disguise.
WeakCallbackInfo<void>::Callback weak_callback_;
- // Provided data for callback. In FREE state, this is used for
- // the free list link.
+ // The meaning of this field depends on node state:
+ // state == FREE: it stores the next free node pointer.
+ // state == NORMAL: it stores the strong retainer label.
+ // otherwise: it stores the parameter for the weak callback.
union {
- void* parameter;
Node* next_free;
- } parameter_or_next_free_;
+ const char* label;
+ void* parameter;
+ } data_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
@@ -447,7 +456,7 @@ void GlobalHandles::Node::IncreaseBlockUses() {
void GlobalHandles::Node::DecreaseBlockUses() {
NodeBlock* node_block = FindBlock();
GlobalHandles* global_handles = node_block->global_handles();
- parameter_or_next_free_.next_free = global_handles->first_free_;
+ data_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
node_block->DecreaseUses();
global_handles->isolate()->counters()->global_handles()->Decrement();
@@ -579,6 +588,11 @@ void* GlobalHandles::ClearWeakness(Object** location) {
return Node::FromLocation(location)->ClearWeakness();
}
+void GlobalHandles::AnnotateStrongRetainer(Object** location,
+ const char* label) {
+ Node::FromLocation(location)->AnnotateStrongRetainer(label);
+}
+
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -596,7 +610,8 @@ void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
// Finalizers need to survive.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -635,7 +650,8 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
for (Node* node : new_space_nodes_) {
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && node->is_active())) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -649,7 +665,8 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
}
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && node->is_active())) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -685,7 +702,8 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
// Finalizers need to survive.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -712,7 +730,8 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
}
} else {
// Node survived and needs to be visited.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -902,17 +921,27 @@ int GlobalHandles::PostGarbageCollectionProcessing(
void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsStrongRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
}
}
}
+void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak()) {
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
+ }
+ }
+}
DISABLE_CFI_PERF
void GlobalHandles::IterateAllRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
}
}
}
@@ -921,7 +950,8 @@ DISABLE_CFI_PERF
void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
for (Node* node : new_space_nodes_) {
if (node->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -932,7 +962,8 @@ void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
for (size_t i = start; i < end; ++i) {
Node* node = new_space_nodes_[i];
if (node->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -1054,7 +1085,7 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
for (Object** block : blocks_) {
DCHECK_GT(limit, 0);
- visitor->VisitRootPointers(Root::kEternalHandles, block,
+ visitor->VisitRootPointers(Root::kEternalHandles, nullptr, block,
block + Min(limit, kSize));
limit -= kSize;
}
@@ -1062,7 +1093,8 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
for (int index : new_space_indices_) {
- visitor->VisitRootPointer(Root::kEternalHandles, GetLocation(index));
+ visitor->VisitRootPointer(Root::kEternalHandles, nullptr,
+ GetLocation(index));
}
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 59b94e371b..e96b74b883 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -77,6 +77,8 @@ class GlobalHandles {
static void MakeWeak(Object*** location_addr);
+ static void AnnotateStrongRetainer(Object** location, const char* label);
+
void RecordStats(HeapStats* stats);
// Returns the current number of handles to global objects.
@@ -108,10 +110,10 @@ class GlobalHandles {
int PostGarbageCollectionProcessing(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
- // Iterates over all strong handles.
void IterateStrongRoots(RootVisitor* v);
- // Iterates over all handles.
+ void IterateWeakRoots(RootVisitor* v);
+
void IterateAllRoots(RootVisitor* v);
void IterateAllNewSpaceRoots(RootVisitor* v);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index bc28181db1..7ffbf99d61 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -470,6 +470,7 @@ class FreeStoreAllocationPolicy;
class FunctionTemplateInfo;
class MemoryChunk;
class NumberDictionary;
+class SimpleNumberDictionary;
class NameDictionary;
class GlobalDictionary;
template <typename T> class MaybeHandle;
@@ -538,25 +539,6 @@ enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
-// Possible outcomes for decisions.
-enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
-
-inline size_t hash_value(Decision decision) {
- return static_cast<uint8_t>(decision);
-}
-
-inline std::ostream& operator<<(std::ostream& os, Decision decision) {
- switch (decision) {
- case Decision::kUnknown:
- return os << "Unknown";
- case Decision::kTrue:
- return os << "True";
- case Decision::kFalse:
- return os << "False";
- }
- UNREACHABLE();
-}
-
// Supported write barrier modes.
enum WriteBarrierKind : uint8_t {
kNoWriteBarrier,
@@ -628,9 +610,6 @@ enum NativesFlag {
INSPECTOR_CODE
};
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
enum ParseRestriction {
@@ -747,15 +726,6 @@ constexpr int kIeeeDoubleMantissaWordOffset = 4;
constexpr int kIeeeDoubleExponentWordOffset = 0;
#endif
-// AccessorCallback
-struct AccessorDescriptor {
- Object* (*getter)(Isolate* isolate, Object* object, void* data);
- Object* (*setter)(
- Isolate* isolate, JSObject* object, Object* value, void* data);
- void* data;
-};
-
-
// -----------------------------------------------------------------------------
// Macros
@@ -811,8 +781,6 @@ enum CpuFeature {
MIPSr2,
MIPSr6,
MIPS_SIMD, // MSA instructions
- // ARM64
- ALWAYS_ALIGN_CSP,
// PPC
FPR_GPR_MOV,
LWSYNC,
@@ -889,13 +857,6 @@ inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
UNREACHABLE();
}
-// Used to specify if a macro instruction must perform a smi check on tagged
-// values.
-enum SmiCheckType {
- DONT_DO_SMI_CHECK,
- DO_SMI_CHECK
-};
-
enum ScopeType : uint8_t {
EVAL_SCOPE, // The top-level scope for an eval source.
FUNCTION_SCOPE, // The top-level scope for a function.
@@ -1060,99 +1021,60 @@ enum VariableLocation : uint8_t {
// immediately initialized upon creation (kCreatedInitialized).
enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
-enum class HoleCheckMode { kRequired, kElided };
-
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
// Serialized in PreparseData, so numeric values should not be changed.
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
-
-enum MinusZeroMode {
- TREAT_MINUS_ZERO_AS_ZERO,
- FAIL_ON_MINUS_ZERO
-};
-
-
-enum Signedness { kSigned, kUnsigned };
-
-enum FunctionKind : uint16_t {
- kNormalFunction = 0,
- kArrowFunction = 1 << 0,
- kGeneratorFunction = 1 << 1,
- kConciseMethod = 1 << 2,
- kDefaultConstructor = 1 << 3,
- kDerivedConstructor = 1 << 4,
- kBaseConstructor = 1 << 5,
- kGetterFunction = 1 << 6,
- kSetterFunction = 1 << 7,
- kAsyncFunction = 1 << 8,
- kModule = 1 << 9,
- kClassFieldsInitializerFunction = 1 << 10 | kConciseMethod,
- kLastFunctionKind = kClassFieldsInitializerFunction,
-
- kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
- kAccessorFunction = kGetterFunction | kSetterFunction,
- kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
- kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
- kClassConstructor =
- kBaseConstructor | kDerivedConstructor | kDefaultConstructor,
- kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
- kAsyncConciseMethod = kAsyncFunction | kConciseMethod,
-
- // https://tc39.github.io/proposal-async-iteration/
- kAsyncConciseGeneratorMethod = kAsyncFunction | kConciseGeneratorMethod,
- kAsyncGeneratorFunction = kAsyncFunction | kGeneratorFunction
+enum FunctionKind : uint8_t {
+ kNormalFunction,
+ kArrowFunction,
+ kGeneratorFunction,
+ kConciseMethod,
+ kDerivedConstructor,
+ kBaseConstructor,
+ kGetterFunction,
+ kSetterFunction,
+ kAsyncFunction,
+ kModule,
+ kClassFieldsInitializerFunction,
+
+ kDefaultBaseConstructor,
+ kDefaultDerivedConstructor,
+ kAsyncArrowFunction,
+ kAsyncConciseMethod,
+
+ kConciseGeneratorMethod,
+ kAsyncConciseGeneratorMethod,
+ kAsyncGeneratorFunction,
+ kLastFunctionKind = kAsyncGeneratorFunction,
};
-inline bool IsValidFunctionKind(FunctionKind kind) {
- return kind == FunctionKind::kNormalFunction ||
- kind == FunctionKind::kArrowFunction ||
- kind == FunctionKind::kGeneratorFunction ||
- kind == FunctionKind::kModule ||
- kind == FunctionKind::kConciseMethod ||
- kind == FunctionKind::kConciseGeneratorMethod ||
- kind == FunctionKind::kGetterFunction ||
- kind == FunctionKind::kSetterFunction ||
- kind == FunctionKind::kAccessorFunction ||
- kind == FunctionKind::kDefaultBaseConstructor ||
- kind == FunctionKind::kDefaultDerivedConstructor ||
- kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kDerivedConstructor ||
- kind == FunctionKind::kAsyncFunction ||
- kind == FunctionKind::kAsyncArrowFunction ||
- kind == FunctionKind::kAsyncConciseMethod ||
- kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kAsyncGeneratorFunction ||
- kind == FunctionKind::kClassFieldsInitializerFunction;
-}
-
-
inline bool IsArrowFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kArrowFunction) != 0;
+ return kind == FunctionKind::kArrowFunction ||
+ kind == FunctionKind::kAsyncArrowFunction;
}
-
-inline bool IsGeneratorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kGeneratorFunction) != 0;
+inline bool IsModule(FunctionKind kind) {
+ return kind == FunctionKind::kModule;
}
-inline bool IsModule(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kModule) != 0;
+inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
+ return kind == FunctionKind::kAsyncGeneratorFunction ||
+ kind == FunctionKind::kAsyncConciseGeneratorMethod;
}
-inline bool IsAsyncFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kAsyncFunction) != 0;
+inline bool IsGeneratorFunction(FunctionKind kind) {
+ return kind == FunctionKind::kGeneratorFunction ||
+ kind == FunctionKind::kConciseGeneratorMethod ||
+ IsAsyncGeneratorFunction(kind);
}
-inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- const FunctionKind kMask = FunctionKind::kAsyncGeneratorFunction;
- return (kind & kMask) == kMask;
+inline bool IsAsyncFunction(FunctionKind kind) {
+ return kind == FunctionKind::kAsyncFunction ||
+ kind == FunctionKind::kAsyncArrowFunction ||
+ kind == FunctionKind::kAsyncConciseMethod ||
+ IsAsyncGeneratorFunction(kind);
}
inline bool IsResumableFunction(FunctionKind kind) {
@@ -1160,50 +1082,47 @@ inline bool IsResumableFunction(FunctionKind kind) {
}
inline bool IsConciseMethod(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kConciseMethod) != 0;
+ return kind == FunctionKind::kConciseMethod ||
+ kind == FunctionKind::kConciseGeneratorMethod ||
+ kind == FunctionKind::kAsyncConciseMethod ||
+ kind == FunctionKind::kAsyncConciseGeneratorMethod ||
+ kind == FunctionKind::kClassFieldsInitializerFunction;
}
inline bool IsGetterFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kGetterFunction) != 0;
+ return kind == FunctionKind::kGetterFunction;
}
inline bool IsSetterFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kSetterFunction) != 0;
+ return kind == FunctionKind::kSetterFunction;
}
inline bool IsAccessorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kAccessorFunction) != 0;
+ return kind == FunctionKind::kGetterFunction ||
+ kind == FunctionKind::kSetterFunction;
}
-
inline bool IsDefaultConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kDefaultConstructor) != 0;
+ return kind == FunctionKind::kDefaultBaseConstructor ||
+ kind == FunctionKind::kDefaultDerivedConstructor;
}
-
inline bool IsBaseConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kBaseConstructor) != 0;
+ return kind == FunctionKind::kBaseConstructor ||
+ kind == FunctionKind::kDefaultBaseConstructor;
}
inline bool IsDerivedConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kDerivedConstructor) != 0;
+ return kind == FunctionKind::kDerivedConstructor ||
+ kind == FunctionKind::kDefaultDerivedConstructor;
}
inline bool IsClassConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kClassConstructor) != 0;
+ return IsBaseConstructor(kind) || IsDerivedConstructor(kind);
}
inline bool IsClassFieldsInitializerFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
return kind == FunctionKind::kClassFieldsInitializerFunction;
}
@@ -1216,6 +1135,48 @@ inline bool IsConstructable(FunctionKind kind) {
return true;
}
+inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kNormalFunction:
+ return os << "NormalFunction";
+ case FunctionKind::kArrowFunction:
+ return os << "ArrowFunction";
+ case FunctionKind::kGeneratorFunction:
+ return os << "GeneratorFunction";
+ case FunctionKind::kConciseMethod:
+ return os << "ConciseMethod";
+ case FunctionKind::kDerivedConstructor:
+ return os << "DerivedConstructor";
+ case FunctionKind::kBaseConstructor:
+ return os << "BaseConstructor";
+ case FunctionKind::kGetterFunction:
+ return os << "GetterFunction";
+ case FunctionKind::kSetterFunction:
+ return os << "SetterFunction";
+ case FunctionKind::kAsyncFunction:
+ return os << "AsyncFunction";
+ case FunctionKind::kModule:
+ return os << "Module";
+ case FunctionKind::kClassFieldsInitializerFunction:
+ return os << "ClassFieldsInitializerFunction";
+ case FunctionKind::kDefaultBaseConstructor:
+ return os << "DefaultBaseConstructor";
+ case FunctionKind::kDefaultDerivedConstructor:
+ return os << "DefaultDerivedConstructor";
+ case FunctionKind::kAsyncArrowFunction:
+ return os << "AsyncArrowFunction";
+ case FunctionKind::kAsyncConciseMethod:
+ return os << "AsyncConciseMethod";
+ case FunctionKind::kConciseGeneratorMethod:
+ return os << "ConciseGeneratorMethod";
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ return os << "AsyncConciseGeneratorMethod";
+ case FunctionKind::kAsyncGeneratorFunction:
+ return os << "AsyncGeneratorFunction";
+ }
+ UNREACHABLE();
+}
+
enum class InterpreterPushArgsMode : unsigned {
kJSFunction,
kWithFinalSpread,
@@ -1406,6 +1367,8 @@ enum ExternalArrayType {
kExternalFloat32Array,
kExternalFloat64Array,
kExternalUint8ClampedArray,
+ kExternalBigInt64Array,
+ kExternalBigUint64Array,
};
struct AssemblerDebugInfo {
@@ -1462,6 +1425,8 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
+enum class BlockingBehavior { kBlock, kDontBlock };
+
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
@@ -1476,9 +1441,7 @@ enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
C(PendingHandlerFP, pending_handler_fp) \
C(PendingHandlerSP, pending_handler_sp) \
C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp) \
- C(MicrotaskQueueBailoutIndex, microtask_queue_bailout_index) \
- C(MicrotaskQueueBailoutCount, microtask_queue_bailout_count)
+ C(JSEntrySP, js_entry_sp)
enum IsolateAddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/handler-table.cc
new file mode 100644
index 0000000000..72e0e6caf8
--- /dev/null
+++ b/deps/v8/src/handler-table.cc
@@ -0,0 +1,220 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handler-table.h"
+
+#include <iomanip>
+
+#include "src/assembler-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+HandlerTable::HandlerTable(Code* code)
+ : HandlerTable(code->InstructionStart(), code->handler_table_offset()) {}
+
+HandlerTable::HandlerTable(BytecodeArray* bytecode_array)
+ : HandlerTable(bytecode_array->handler_table()) {}
+
+HandlerTable::HandlerTable(ByteArray* byte_array)
+ : number_of_entries_(byte_array->length() / kRangeEntrySize /
+ sizeof(int32_t)),
+#ifdef DEBUG
+ mode_(kRangeBasedEncoding),
+#endif
+ raw_encoded_data_(byte_array->GetDataStartAddress()) {
+}
+
+HandlerTable::HandlerTable(Address instruction_start,
+ size_t handler_table_offset)
+ : number_of_entries_(0),
+#ifdef DEBUG
+ mode_(kReturnAddressBasedEncoding),
+#endif
+ raw_encoded_data_(instruction_start + handler_table_offset) {
+ if (handler_table_offset > 0) {
+ number_of_entries_ = Memory::int32_at(raw_encoded_data_);
+ raw_encoded_data_ += sizeof(int32_t);
+ }
+}
+
+int HandlerTable::GetRangeStart(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeStartIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetRangeEnd(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeEndIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetRangeHandler(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ return HandlerOffsetField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+int HandlerTable::GetRangeData(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeDataIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
+ int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ return HandlerPredictionField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+int HandlerTable::GetReturnOffset(int index) const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfReturnEntries());
+ int offset = index * kReturnEntrySize + kReturnOffsetIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetReturnHandler(int index) const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfReturnEntries());
+ int offset = index * kReturnEntrySize + kReturnHandlerIndex;
+ return HandlerOffsetField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+void HandlerTable::SetRangeStart(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeStartIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeEnd(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeEndIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeHandler(int index, int handler_offset,
+ CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(handler_offset) |
+ HandlerPredictionField::encode(prediction);
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeData(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeDataIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+// static
+int HandlerTable::LengthForRange(int entries) {
+ return entries * kRangeEntrySize * sizeof(int32_t);
+}
+
+// static
+int HandlerTable::EmitReturnTableStart(Assembler* masm, int entries) {
+ masm->DataAlign(sizeof(int32_t)); // Make sure entries are aligned.
+ masm->RecordComment(";;; Exception handler table.");
+ int table_start = masm->pc_offset();
+ masm->dd(entries);
+ return table_start;
+}
+
+// static
+void HandlerTable::EmitReturnEntry(Assembler* masm, int offset, int handler) {
+ masm->dd(offset);
+ masm->dd(HandlerOffsetField::encode(handler));
+}
+
+int HandlerTable::NumberOfRangeEntries() const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ return number_of_entries_;
+}
+
+int HandlerTable::NumberOfReturnEntries() const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ return number_of_entries_;
+}
+
+int HandlerTable::LookupRange(int pc_offset, int* data_out,
+ CatchPrediction* prediction_out) {
+ int innermost_handler = -1;
+#ifdef DEBUG
+ // Assuming that ranges are well nested, we don't need to track the innermost
+ // offsets. This is just to verify that the table is actually well nested.
+ int innermost_start = std::numeric_limits<int>::min();
+ int innermost_end = std::numeric_limits<int>::max();
+#endif
+ for (int i = 0; i < NumberOfRangeEntries(); ++i) {
+ int start_offset = GetRangeStart(i);
+ int end_offset = GetRangeEnd(i);
+ int handler_offset = GetRangeHandler(i);
+ int handler_data = GetRangeData(i);
+ CatchPrediction prediction = GetRangePrediction(i);
+ if (pc_offset >= start_offset && pc_offset < end_offset) {
+ DCHECK_GE(start_offset, innermost_start);
+ DCHECK_LT(end_offset, innermost_end);
+ innermost_handler = handler_offset;
+#ifdef DEBUG
+ innermost_start = start_offset;
+ innermost_end = end_offset;
+#endif
+ if (data_out) *data_out = handler_data;
+ if (prediction_out) *prediction_out = prediction;
+ }
+ }
+ return innermost_handler;
+}
+
+// TODO(turbofan): Make sure table is sorted and use binary search.
+int HandlerTable::LookupReturn(int pc_offset) {
+ for (int i = 0; i < NumberOfReturnEntries(); ++i) {
+ int return_offset = GetReturnOffset(i);
+ if (pc_offset == return_offset) {
+ return GetReturnHandler(i);
+ }
+ }
+ return -1;
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
+ os << " from to hdlr (prediction, data)\n";
+ for (int i = 0; i < NumberOfRangeEntries(); ++i) {
+ int pc_start = GetRangeStart(i);
+ int pc_end = GetRangeEnd(i);
+ int handler_offset = GetRangeHandler(i);
+ int handler_data = GetRangeData(i);
+ CatchPrediction prediction = GetRangePrediction(i);
+ os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
+ << ") -> " << std::setw(4) << handler_offset
+ << " (prediction=" << prediction << ", data=" << handler_data << ")\n";
+ }
+}
+
+void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
+ os << " off hdlr\n";
+ for (int i = 0; i < NumberOfReturnEntries(); ++i) {
+ int pc_offset = GetReturnOffset(i);
+ int handler_offset = GetReturnHandler(i);
+ os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
+ << handler_offset << "\n";
+ }
+}
+
+#endif // ENABLE_DISASSEMBLER
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/handler-table.h b/deps/v8/src/handler-table.h
new file mode 100644
index 0000000000..c2e282001c
--- /dev/null
+++ b/deps/v8/src/handler-table.h
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLER_TABLE_H_
+#define V8_HANDLER_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Assembler;
+class ByteArray;
+class BytecodeArray;
+
+// HandlerTable is a byte array containing entries for exception handlers in
+// the code object it is associated with. The tables come in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Stored in a {ByteArray} that
+// is attached to each {BytecodeArray}. Contains one entry per exception
+// handler and a range representing the try-block covered by that handler.
+// Layout looks as follows:
+// [ range-start , range-end , handler-offset , handler-data ]
+// 2) Based on return addresses: Used for turbofanned code. Stored directly in
+// the instruction stream of the {Code} object. Contains one entry per
+// call-site that could throw an exception. Layout looks as follows:
+// [ return-address-offset , handler-offset ]
+class V8_EXPORT_PRIVATE HandlerTable {
+ public:
+ // Conservative prediction whether a given handler will locally catch an
+ // exception or cause a re-throw to outside the code boundary. Since this is
+ // undecidable it is merely an approximation (e.g. useful for debugger).
+ enum CatchPrediction {
+ UNCAUGHT, // The handler will (likely) rethrow the exception.
+ CAUGHT, // The exception will be caught by the handler.
+ PROMISE, // The exception will be caught and cause a promise rejection.
+ DESUGARING, // The exception will be caught, but both the exception and
+ // the catching are part of a desugaring and should therefore
+ // not be visible to the user (we won't notify the debugger of
+ // such exceptions).
+ ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
+ // in the desugaring of an async function, so special
+ // async/await handling in the debugger can take place.
+ };
+
+ // Constructors for the various encodings.
+ explicit HandlerTable(Code* code);
+ explicit HandlerTable(ByteArray* byte_array);
+ explicit HandlerTable(BytecodeArray* bytecode_array);
+ explicit HandlerTable(Address instruction_start, size_t handler_table_offset);
+
+ // Getters for handler table based on ranges.
+ int GetRangeStart(int index) const;
+ int GetRangeEnd(int index) const;
+ int GetRangeHandler(int index) const;
+ int GetRangeData(int index) const;
+
+ // Setters for handler table based on ranges.
+ void SetRangeStart(int index, int value);
+ void SetRangeEnd(int index, int value);
+ void SetRangeHandler(int index, int offset, CatchPrediction pred);
+ void SetRangeData(int index, int value);
+
+ // Returns the required length of the underlying byte array.
+ static int LengthForRange(int entries);
+
+ // Emitters for handler table based on return addresses.
+ static int EmitReturnTableStart(Assembler* masm, int entries);
+ static void EmitReturnEntry(Assembler* masm, int offset, int handler);
+
+ // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
+ // the start of the potentially throwing instruction (using return addresses
+ // for this value would be invalid).
+ int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
+
+ // Lookup handler in a table based on return addresses.
+ int LookupReturn(int pc_offset);
+
+ // Returns the number of entries in the table.
+ int NumberOfRangeEntries() const;
+ int NumberOfReturnEntries() const;
+
+#ifdef ENABLE_DISASSEMBLER
+ void HandlerTableRangePrint(std::ostream& os); // NOLINT
+ void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
+
+ // Getters for handler table based on ranges.
+ CatchPrediction GetRangePrediction(int index) const;
+
+ // Getters for handler table based on return addresses.
+ int GetReturnOffset(int index) const;
+ int GetReturnHandler(int index) const;
+
+ // Number of entries in the loaded handler table.
+ int number_of_entries_;
+
+#ifdef DEBUG
+ // The encoding mode of the table. Mostly useful for debugging to check that
+ // used accessors and constructors fit together.
+ EncodingMode mode_;
+#endif
+
+ // Direct pointer into the encoded data. This pointer points into object on
+ // the GC heap (either {ByteArray} or {Code}) and hence would become stale
+ // during a collection. Hence we disallow any allocation.
+ Address raw_encoded_data_;
+ DisallowHeapAllocation no_gc_;
+
+ // Layout description for handler table based on ranges.
+ static const int kRangeStartIndex = 0;
+ static const int kRangeEndIndex = 1;
+ static const int kRangeHandlerIndex = 2;
+ static const int kRangeDataIndex = 3;
+ static const int kRangeEntrySize = 4;
+
+ // Layout description for handler table based on return addresses.
+ static const int kReturnOffsetIndex = 0;
+ static const int kReturnHandlerIndex = 1;
+ static const int kReturnEntrySize = 2;
+
+ // Encoding of the {handler} field.
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
+ class HandlerOffsetField : public BitField<int, 3, 29> {};
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLER_TABLE_H_
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index e747ba2720..ae06892675 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -5,245 +5,258 @@
#ifndef V8_HEAP_SYMBOLS_H_
#define V8_HEAP_SYMBOLS_H_
-#define INTERNALIZED_STRING_LIST(V) \
- V(anonymous_function_string, "(anonymous function)") \
- V(anonymous_string, "anonymous") \
- V(add_string, "add") \
- V(apply_string, "apply") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(arguments_to_string, "[object Arguments]") \
- V(Array_string, "Array") \
- V(ArrayIterator_string, "Array Iterator") \
- V(assign_string, "assign") \
- V(async_string, "async") \
- V(await_string, "await") \
- V(array_to_string, "[object Array]") \
- V(boolean_to_string, "[object Boolean]") \
- V(date_to_string, "[object Date]") \
- V(error_to_string, "[object Error]") \
- V(function_to_string, "[object Function]") \
- V(number_to_string, "[object Number]") \
- V(object_to_string, "[object Object]") \
- V(regexp_to_string, "[object RegExp]") \
- V(string_to_string, "[object String]") \
- V(bigint_string, "bigint") \
- V(BigInt_string, "BigInt") \
- V(bind_string, "bind") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(bound__string, "bound ") \
- V(buffer_string, "buffer") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(call_string, "call") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(column_string, "column") \
- V(configurable_string, "configurable") \
- V(constructor_string, "constructor") \
- V(construct_string, "construct") \
- V(create_string, "create") \
- V(currency_string, "currency") \
- V(Date_string, "Date") \
- V(dayperiod_string, "dayperiod") \
- V(day_string, "day") \
- V(decimal_string, "decimal") \
- V(default_string, "default") \
- V(defineProperty_string, "defineProperty") \
- V(deleteProperty_string, "deleteProperty") \
- V(did_handle_string, "didHandle") \
- V(display_name_string, "displayName") \
- V(done_string, "done") \
- V(dotAll_string, "dotAll") \
- V(dot_catch_string, ".catch") \
- V(dot_for_string, ".for") \
- V(dot_generator_object_string, ".generator_object") \
- V(dot_iterator_string, ".iterator") \
- V(dot_result_string, ".result") \
- V(dot_switch_tag_string, ".switch_tag") \
- V(dot_string, ".") \
- V(exec_string, "exec") \
- V(entries_string, "entries") \
- V(enqueue_string, "enqueue") \
- V(enumerable_string, "enumerable") \
- V(era_string, "era") \
- V(Error_string, "Error") \
- V(eval_string, "eval") \
- V(EvalError_string, "EvalError") \
- V(false_string, "false") \
- V(flags_string, "flags") \
- V(fraction_string, "fraction") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(Generator_string, "Generator") \
- V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
- V(getPrototypeOf_string, "getPrototypeOf") \
- V(get_string, "get") \
- V(get_space_string, "get ") \
- V(global_string, "global") \
- V(group_string, "group") \
- V(groups_string, "groups") \
- V(has_string, "has") \
- V(hour_string, "hour") \
- V(ignoreCase_string, "ignoreCase") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(infinity_string, "infinity") \
- V(Infinity_string, "Infinity") \
- V(integer_string, "integer") \
- V(input_string, "input") \
- V(isExtensible_string, "isExtensible") \
- V(isView_string, "isView") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(keys_string, "keys") \
- V(lastIndex_string, "lastIndex") \
- V(length_string, "length") \
- V(let_string, "let") \
- V(line_string, "line") \
- V(literal_string, "literal") \
- V(Map_string, "Map") \
- V(message_string, "message") \
- V(minus_Infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(minusSign_string, "minusSign") \
- V(minute_string, "minute") \
- V(Module_string, "Module") \
- V(month_string, "month") \
- V(multiline_string, "multiline") \
- V(name_string, "name") \
- V(native_string, "native") \
- V(nan_string, "nan") \
- V(NaN_string, "NaN") \
- V(new_target_string, ".new.target") \
- V(next_string, "next") \
- V(NFC_string, "NFC") \
- V(NFD_string, "NFD") \
- V(NFKC_string, "NFKC") \
- V(NFKD_string, "NFKD") \
- V(not_equal, "not-equal") \
- V(null_string, "null") \
- V(null_to_string, "[object Null]") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(object_string, "object") \
- V(Object_string, "Object") \
- V(ok, "ok") \
- V(one_string, "1") \
- V(ownKeys_string, "ownKeys") \
- V(percentSign_string, "percentSign") \
- V(plusSign_string, "plusSign") \
- V(position_string, "position") \
- V(preventExtensions_string, "preventExtensions") \
- V(Promise_string, "Promise") \
- V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
- V(promise_string, "promise") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(proxy_string, "proxy") \
- V(Proxy_string, "Proxy") \
- V(query_colon_string, "(?:)") \
- V(RangeError_string, "RangeError") \
- V(raw_string, "raw") \
- V(ReferenceError_string, "ReferenceError") \
- V(RegExp_string, "RegExp") \
- V(reject_string, "reject") \
- V(resolve_string, "resolve") \
- V(return_string, "return") \
- V(revoke_string, "revoke") \
- V(script_string, "script") \
- V(second_string, "second") \
- V(setPrototypeOf_string, "setPrototypeOf") \
- V(set_space_string, "set ") \
- V(set_string, "set") \
- V(Set_string, "Set") \
- V(source_string, "source") \
- V(sourceText_string, "sourceText") \
- V(stack_string, "stack") \
- V(stackTraceLimit_string, "stackTraceLimit") \
- V(star_default_star_string, "*default*") \
- V(sticky_string, "sticky") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(symbol_species_string, "[Symbol.species]") \
- V(SyntaxError_string, "SyntaxError") \
- V(then_string, "then") \
- V(this_function_string, ".this_function") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(timed_out, "timed-out") \
- V(timeZoneName_string, "timeZoneName") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(TypeError_string, "TypeError") \
- V(type_string, "type") \
- V(CompileError_string, "CompileError") \
- V(LinkError_string, "LinkError") \
- V(RuntimeError_string, "RuntimeError") \
- V(undefined_string, "undefined") \
- V(undefined_to_string, "[object Undefined]") \
- V(unicode_string, "unicode") \
- V(use_asm_string, "use asm") \
- V(use_strict_string, "use strict") \
- V(URIError_string, "URIError") \
- V(valueOf_string, "valueOf") \
- V(values_string, "values") \
- V(value_string, "value") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(weekday_string, "weekday") \
- V(will_handle_string, "willHandle") \
- V(writable_string, "writable") \
- V(year_string, "year") \
+#define INTERNALIZED_STRING_LIST(V) \
+ V(add_string, "add") \
+ V(anonymous_function_string, "(anonymous function)") \
+ V(anonymous_string, "anonymous") \
+ V(apply_string, "apply") \
+ V(Arguments_string, "Arguments") \
+ V(arguments_string, "arguments") \
+ V(arguments_to_string, "[object Arguments]") \
+ V(Array_string, "Array") \
+ V(array_to_string, "[object Array]") \
+ V(ArrayBuffer_string, "ArrayBuffer") \
+ V(ArrayIterator_string, "Array Iterator") \
+ V(assign_string, "assign") \
+ V(async_string, "async") \
+ V(await_string, "await") \
+ V(BigInt_string, "BigInt") \
+ V(bigint_string, "bigint") \
+ V(BigInt64Array_string, "BigInt64Array") \
+ V(BigUint64Array_string, "BigUint64Array") \
+ V(bind_string, "bind") \
+ V(Boolean_string, "Boolean") \
+ V(boolean_string, "boolean") \
+ V(boolean_to_string, "[object Boolean]") \
+ V(bound__string, "bound ") \
+ V(buffer_string, "buffer") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(column_string, "column") \
+ V(CompileError_string, "CompileError") \
+ V(configurable_string, "configurable") \
+ V(construct_string, "construct") \
+ V(constructor_string, "constructor") \
+ V(create_string, "create") \
+ V(currency_string, "currency") \
+ V(Date_string, "Date") \
+ V(date_to_string, "[object Date]") \
+ V(day_string, "day") \
+ V(dayperiod_string, "dayperiod") \
+ V(decimal_string, "decimal") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(did_handle_string, "didHandle") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dot_catch_string, ".catch") \
+ V(dot_for_string, ".for") \
+ V(dot_generator_object_string, ".generator_object") \
+ V(dot_iterator_string, ".iterator") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(dot_switch_tag_string, ".switch_tag") \
+ V(dotAll_string, "dotAll") \
+ V(enqueue_string, "enqueue") \
+ V(entries_string, "entries") \
+ V(enumerable_string, "enumerable") \
+ V(era_string, "era") \
+ V(Error_string, "Error") \
+ V(error_to_string, "[object Error]") \
+ V(eval_string, "eval") \
+ V(EvalError_string, "EvalError") \
+ V(exec_string, "exec") \
+ V(false_string, "false") \
+ V(flags_string, "flags") \
+ V(Float32Array_string, "Float32Array") \
+ V(Float64Array_string, "Float64Array") \
+ V(fraction_string, "fraction") \
+ V(Function_string, "Function") \
+ V(function_native_code_string, "function () { [native code] }") \
+ V(function_string, "function") \
+ V(function_to_string, "[object Function]") \
+ V(Generator_string, "Generator") \
+ V(get_space_string, "get ") \
+ V(get_string, "get") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(global_string, "global") \
+ V(group_string, "group") \
+ V(groups_string, "groups") \
+ V(has_string, "has") \
+ V(hour_string, "hour") \
+ V(ignoreCase_string, "ignoreCase") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(Infinity_string, "Infinity") \
+ V(infinity_string, "infinity") \
+ V(input_string, "input") \
+ V(Int16Array_string, "Int16Array") \
+ V(Int32Array_string, "Int32Array") \
+ V(Int8Array_string, "Int8Array") \
+ V(integer_string, "integer") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(keys_string, "keys") \
+ V(lastIndex_string, "lastIndex") \
+ V(length_string, "length") \
+ V(let_string, "let") \
+ V(line_string, "line") \
+ V(LinkError_string, "LinkError") \
+ V(literal_string, "literal") \
+ V(Map_string, "Map") \
+ V(MapIterator_string, "Map Iterator") \
+ V(message_string, "message") \
+ V(minus_Infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(minusSign_string, "minusSign") \
+ V(minute_string, "minute") \
+ V(Module_string, "Module") \
+ V(month_string, "month") \
+ V(multiline_string, "multiline") \
+ V(name_string, "name") \
+ V(NaN_string, "NaN") \
+ V(nan_string, "nan") \
+ V(native_string, "native") \
+ V(new_target_string, ".new.target") \
+ V(next_string, "next") \
+ V(NFC_string, "NFC") \
+ V(NFD_string, "NFD") \
+ V(NFKC_string, "NFKC") \
+ V(NFKD_string, "NFKD") \
+ V(not_equal, "not-equal") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(Number_string, "Number") \
+ V(number_string, "number") \
+ V(number_to_string, "[object Number]") \
+ V(Object_string, "Object") \
+ V(object_string, "object") \
+ V(object_to_string, "[object Object]") \
+ V(ok, "ok") \
+ V(one_string, "1") \
+ V(ownKeys_string, "ownKeys") \
+ V(percentSign_string, "percentSign") \
+ V(plusSign_string, "plusSign") \
+ V(position_string, "position") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(Promise_string, "Promise") \
+ V(promise_string, "promise") \
+ V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(proxy_string, "proxy") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RangeError_string, "RangeError") \
+ V(raw_string, "raw") \
+ V(ReferenceError_string, "ReferenceError") \
+ V(RegExp_string, "RegExp") \
+ V(regexp_to_string, "[object RegExp]") \
+ V(reject_string, "reject") \
+ V(resolve_string, "resolve") \
+ V(return_string, "return") \
+ V(revoke_string, "revoke") \
+ V(RuntimeError_string, "RuntimeError") \
+ V(Script_string, "Script") \
+ V(script_string, "script") \
+ V(second_string, "second") \
+ V(set_space_string, "set ") \
+ V(Set_string, "Set") \
+ V(set_string, "set") \
+ V(SetIterator_string, "Set Iterator") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(source_string, "source") \
+ V(sourceText_string, "sourceText") \
+ V(stack_string, "stack") \
+ V(stackTraceLimit_string, "stackTraceLimit") \
+ V(star_default_star_string, "*default*") \
+ V(sticky_string, "sticky") \
+ V(String_string, "String") \
+ V(string_string, "string") \
+ V(string_to_string, "[object String]") \
+ V(symbol_species_string, "[Symbol.species]") \
+ V(Symbol_string, "Symbol") \
+ V(symbol_string, "symbol") \
+ V(SyntaxError_string, "SyntaxError") \
+ V(then_string, "then") \
+ V(this_function_string, ".this_function") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(timed_out, "timed-out") \
+ V(timeZoneName_string, "timeZoneName") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(type_string, "type") \
+ V(TypeError_string, "TypeError") \
+ V(Uint16Array_string, "Uint16Array") \
+ V(Uint32Array_string, "Uint32Array") \
+ V(Uint8Array_string, "Uint8Array") \
+ V(Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(unicode_string, "unicode") \
+ V(URIError_string, "URIError") \
+ V(use_asm_string, "use asm") \
+ V(use_strict_string, "use strict") \
+ V(value_string, "value") \
+ V(valueOf_string, "valueOf") \
+ V(values_string, "values") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(weekday_string, "weekday") \
+ V(will_handle_string, "willHandle") \
+ V(writable_string, "writable") \
+ V(year_string, "year") \
V(zero_string, "0")
-#define PRIVATE_SYMBOL_LIST(V) \
- V(array_iteration_kind_symbol) \
- V(array_iterator_next_symbol) \
- V(array_iterator_object_symbol) \
- V(call_site_frame_array_symbol) \
- V(call_site_frame_index_symbol) \
- V(console_context_id_symbol) \
- V(console_context_name_symbol) \
- V(class_fields_symbol) \
- V(class_positions_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(frozen_symbol) \
- V(generic_symbol) \
- V(home_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonextensible_symbol) \
- V(not_mapped_symbol) \
- V(premonomorphic_symbol) \
- V(promise_async_stack_id_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_forwarding_handler_symbol) \
- V(promise_handled_by_symbol) \
- V(promise_async_id_symbol) \
- V(promise_default_resolve_handler_symbol) \
- V(promise_default_reject_handler_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
- V(wasm_function_index_symbol) \
- V(wasm_instance_symbol) \
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(call_site_frame_array_symbol) \
+ V(call_site_frame_index_symbol) \
+ V(console_context_id_symbol) \
+ V(console_context_name_symbol) \
+ V(class_fields_symbol) \
+ V(class_positions_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(frozen_symbol) \
+ V(generator_outer_promise_symbol) \
+ V(generic_symbol) \
+ V(home_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
+ V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
+ V(nonextensible_symbol) \
+ V(not_mapped_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_async_stack_id_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_forwarding_handler_symbol) \
+ V(promise_handled_by_symbol) \
+ V(promise_async_id_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
+ V(wasm_function_index_symbol) \
+ V(wasm_instance_symbol) \
V(uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 44ab099ba8..3aafd191cc 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -424,18 +424,11 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
- weak_objects_(weak_objects),
- total_marked_bytes_(0),
- pending_task_count_(0),
- task_count_(0) {
+ weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
- for (int i = 0; i <= kMaxTasks; i++) {
- is_pending_[i] = false;
- task_state_[i].marked_bytes = 0;
- }
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
@@ -443,13 +436,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
- LiveBytesMap* live_bytes = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- live_bytes = &task_state->live_bytes;
- }
- ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
- task_id);
+ ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
+ weak_objects_, task_id);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@@ -458,9 +446,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
{
TimedScope scope(&time_ms);
+
bool done = false;
while (!done) {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
size_t current_marked_bytes = 0;
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
@@ -484,17 +472,16 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->interrupt_request.Value()) {
- task_state->interrupt_condition.Wait(&task_state->lock);
+ if (task_state->preemption_request.Value()) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ConcurrentMarking::Run Preempted");
+ break;
}
}
- {
- // Take the lock to synchronize with worklist update after
- // young generation GC.
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- bailout_->FlushToGlobal(task_id);
- on_hold_->FlushToGlobal(task_id);
- }
+ shared_->FlushToGlobal(task_id);
+ bailout_->FlushToGlobal(task_id);
+ on_hold_->FlushToGlobal(task_id);
+
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
@@ -517,21 +504,21 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK(heap_->use_tasks());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
+ DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
- // TODO(ulan): Increase the number of tasks for platforms that benefit
- // from it.
- task_count_ = static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
- task_count_ = Max(Min(task_count_, kMaxTasks), 1);
+ task_count_ = Max(
+ 1, Min(kMaxTasks,
+ static_cast<int>(V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
}
// Task id 0 is for the main thread.
- for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
+ for (int i = 1; i <= task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
- task_state_[i].interrupt_request.SetValue(false);
+ task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
@@ -540,6 +527,7 @@ void ConcurrentMarking::ScheduleTasks() {
task, v8::Platform::kShortRunningTask);
}
}
+ DCHECK_EQ(task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
@@ -553,25 +541,24 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
}
-void ConcurrentMarking::WaitForTasks() {
- if (!FLAG_concurrent_marking) return;
+bool ConcurrentMarking::Stop(StopRequest stop_request) {
+ if (!FLAG_concurrent_marking) return false;
base::LockGuard<base::Mutex> guard(&pending_lock_);
- while (pending_task_count_ > 0) {
- pending_condition_.Wait(&pending_lock_);
- }
-}
-void ConcurrentMarking::EnsureCompleted() {
- if (!FLAG_concurrent_marking) return;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
- CancelableTaskManager* task_manager =
- heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= task_count_; i++) {
- if (is_pending_[i]) {
- if (task_manager->TryAbort(cancelable_id_[i]) ==
- CancelableTaskManager::kTaskAborted) {
- is_pending_[i] = false;
- --pending_task_count_;
+ if (pending_task_count_ == 0) return false;
+
+ if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
+ CancelableTaskManager* task_manager =
+ heap_->isolate()->cancelable_task_manager();
+ for (int i = 1; i <= task_count_; i++) {
+ if (is_pending_[i]) {
+ if (task_manager->TryAbort(cancelable_id_[i]) ==
+ CancelableTaskManager::kTaskAborted) {
+ is_pending_[i] = false;
+ --pending_task_count_;
+ } else if (stop_request == StopRequest::PREEMPT_TASKS) {
+ task_state_[i].preemption_request.SetValue(true);
+ }
}
}
}
@@ -581,6 +568,7 @@ void ConcurrentMarking::EnsureCompleted() {
for (int i = 1; i <= task_count_; i++) {
DCHECK(!is_pending_[i]);
}
+ return true;
}
void ConcurrentMarking::FlushLiveBytes(
@@ -620,25 +608,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
- : concurrent_marking_(concurrent_marking) {
- if (!FLAG_concurrent_marking) return;
- // Request task_state for all tasks.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
- }
- // Now take a lock to ensure that the tasks are waiting.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].lock.Lock();
- }
+ : concurrent_marking_(concurrent_marking),
+ resume_on_exit_(concurrent_marking_->Stop(
+ ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
ConcurrentMarking::PauseScope::~PauseScope() {
- if (!FLAG_concurrent_marking) return;
- for (int i = kMaxTasks; i >= 1; i--) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
- concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
- concurrent_marking_->task_state_[i].lock.Unlock();
- }
+ if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 0f0c8bf992..c5af406e45 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_CONCURRENT_MARKING_
-#define V8_HEAP_CONCURRENT_MARKING_
+#ifndef V8_HEAP_CONCURRENT_MARKING_H_
+#define V8_HEAP_CONCURRENT_MARKING_H_
+#include "include/v8-platform.h"
#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -26,26 +30,48 @@ using LiveBytesMap =
class ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
- // are paused and are not looking at the heap objects.
+ // are preempted and are not looking at the heap objects, concurrent marking
+ // is resumed when the scope is exited.
class PauseScope {
public:
explicit PauseScope(ConcurrentMarking* concurrent_marking);
~PauseScope();
private:
- ConcurrentMarking* concurrent_marking_;
+ ConcurrentMarking* const concurrent_marking_;
+ const bool resume_on_exit_;
};
- static const int kMaxTasks = 4;
+ enum class StopRequest {
+ // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
+ PREEMPT_TASKS,
+ // Wait for ongoing tasks to complete (and cancels unstarted tasks).
+ COMPLETE_ONGOING_TASKS,
+ // Wait for all scheduled tasks to complete (only use this in tests that
+ // control the full stack -- otherwise tasks cancelled by the platform can
+ // make this call hang).
+ COMPLETE_TASKS_FOR_TESTING,
+ };
+
+ // TODO(gab): The only thing that prevents this being above 7 is
+ // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
+ // task 0, reserved for the main thread).
+ static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects);
+ // Schedules asynchronous tasks to perform concurrent marking. Objects in the
+ // heap should not be moved while these are active (can be stopped safely via
+ // Stop() or PauseScope).
void ScheduleTasks();
- void WaitForTasks();
- void EnsureCompleted();
+
+ // Stops concurrent marking per |stop_request|'s semantics. Returns true
+ // if concurrent marking was in progress, false otherwise.
+ bool Stop(StopRequest stop_request);
+
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
@@ -59,37 +85,32 @@ class ConcurrentMarking {
private:
struct TaskState {
- // When the concurrent marking task has this lock, then objects in the
- // heap are guaranteed to not move.
- base::Mutex lock;
- // The main thread sets this flag to true, when it wants the concurrent
- // maker to give up the lock.
- base::AtomicValue<bool> interrupt_request;
- // The concurrent marker waits on this condition until the request
- // flag is cleared by the main thread.
- base::ConditionVariable interrupt_condition;
+ // The main thread sets this flag to true when it wants the concurrent
+ // marker to give up the worker thread.
+ base::AtomicValue<bool> preemption_request;
+
LiveBytesMap live_bytes;
- size_t marked_bytes;
+ size_t marked_bytes = 0;
char cache_line_padding[64];
};
class Task;
void Run(int task_id, TaskState* task_state);
- Heap* heap_;
- MarkingWorklist* shared_;
- MarkingWorklist* bailout_;
- MarkingWorklist* on_hold_;
- WeakObjects* weak_objects_;
+ Heap* const heap_;
+ MarkingWorklist* const shared_;
+ MarkingWorklist* const bailout_;
+ MarkingWorklist* const on_hold_;
+ WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
- base::AtomicNumber<size_t> total_marked_bytes_;
+ base::AtomicNumber<size_t> total_marked_bytes_{0};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
- int pending_task_count_;
- bool is_pending_[kMaxTasks + 1];
- CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1];
- int task_count_;
+ int pending_task_count_ = 0;
+ bool is_pending_[kMaxTasks + 1] = {};
+ CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
+ int task_count_ = 0;
};
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_PAGE_PARALLEL_JOB_
+#endif // V8_HEAP_CONCURRENT_MARKING_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index f4e5c1fe13..41af95fa44 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -626,6 +626,7 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
page = page->next_page();
@@ -640,6 +641,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
page = page->next_page();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 7f965602b8..9a83c0d172 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -17,7 +17,6 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -46,6 +45,7 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/data-handler.h"
#include "src/objects/shared-function-info.h"
@@ -56,6 +56,7 @@
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
@@ -177,6 +178,7 @@ Heap::Heap()
raw_allocations_hash_(0),
stress_marking_observer_(nullptr),
stress_scavenge_observer_(nullptr),
+ allocation_step_in_progress_(false),
max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
@@ -461,30 +463,6 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
return false;
}
-namespace {
-const char* RootToString(Root root) {
- switch (root) {
-#define ROOT_CASE(root_id, ignore, description) \
- case Root::root_id: \
- return description;
- ROOT_ID_LIST(ROOT_CASE)
-#undef ROOT_CASE
- case Root::kCodeFlusher:
- return "(Code flusher)";
- case Root::kPartialSnapshotCache:
- return "(Partial snapshot cache)";
- case Root::kWeakCollections:
- return "(Weak collections)";
- case Root::kWrapperTracing:
- return "(Wrapper tracing)";
- case Root::kUnknown:
- return "(Unknown)";
- }
- UNREACHABLE();
- return nullptr;
-}
-} // namespace
-
void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
@@ -527,7 +505,7 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
}
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
- PrintF("Root: %s\n", RootToString(root));
+ PrintF("Root: %s\n", RootVisitor::RootName(root));
PrintF("-------------------------------------------------\n");
}
@@ -644,7 +622,7 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(NumberDictionary* value) {
+void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
@@ -1112,6 +1090,66 @@ void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
set_current_gc_flags(kNoGCFlags);
}
+namespace {
+
+intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
+ int words = size / kPointerSize;
+ DCHECK_EQ(a->Size(), size);
+ DCHECK_EQ(b->Size(), size);
+ intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
+ intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
+ for (int i = 0; i < words; i++) {
+ if (*slot_a != *slot_b) {
+ return *slot_a - *slot_b;
+ }
+ slot_a++;
+ slot_b++;
+ }
+ return 0;
+}
+
+void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
+ if (objects.size() == 0) return;
+
+ sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
+ intptr_t c = CompareWords(size, a, b);
+ if (c != 0) return c < 0;
+ return a < b;
+ });
+
+ std::vector<std::pair<int, HeapObject*>> duplicates;
+ HeapObject* current = objects[0];
+ int count = 1;
+ for (size_t i = 1; i < objects.size(); i++) {
+ if (CompareWords(size, current, objects[i]) == 0) {
+ count++;
+ } else {
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+ count = 1;
+ current = objects[i];
+ }
+ }
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+
+ int threshold = FLAG_trace_duplicate_threshold_kb * KB;
+
+ sort(duplicates.begin(), duplicates.end());
+ for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
+ int duplicate_bytes = it->first * size;
+ if (duplicate_bytes < threshold) break;
+ PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
+ duplicate_bytes / KB);
+ PrintF("Sample object: ");
+ it->second->Print();
+ PrintF("============================\n");
+ }
+}
+} // anonymous namespace
+
void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
@@ -1129,12 +1167,9 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
RuntimeCallTimerScope runtime_timer(
isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -1151,6 +1186,28 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
+
+ if (FLAG_trace_duplicate_threshold_kb) {
+ std::map<int, std::vector<HeapObject*>> objects_by_size;
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
+ space = spaces.next()) {
+ HeapObjectIterator it(space);
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
+ ++it) {
+ ReportDuplicates(it->first, it->second);
+ }
+ }
}
void Heap::ReportExternalMemoryPressure() {
@@ -1316,11 +1373,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
- if (isolate()->concurrent_recompilation_enabled()) {
- // Flush the queued recompilation tasks.
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
+
number_of_disposed_maps_ = retained_maps()->Length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
@@ -1733,12 +1787,12 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
+ PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(isolate());
- PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
CodeSpaceMemoryModificationScope code_modifcation(this);
@@ -1924,11 +1978,10 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
- return Max(
- 1,
- Min(Min(num_scavenge_tasks, kMaxScavengerTasks),
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())));
+ static int num_cores =
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
}
void Heap::Scavenge() {
@@ -2015,7 +2068,7 @@ void Heap::Scavenge() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(copied_list.IsGlobalEmpty());
DCHECK(promotion_list.IsGlobalEmpty());
}
@@ -2187,7 +2240,8 @@ void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ new_space_strings_.data(),
new_space_strings_.data() + new_space_strings_.size());
}
}
@@ -2195,7 +2249,8 @@ void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ old_space_strings_.data(),
old_space_strings_.data() + old_space_strings_.size());
}
}
@@ -2301,7 +2356,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitRootPointers(Root root, Object** start, Object** end) {
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -2512,12 +2568,12 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
-AllocationResult Heap::AllocateBigInt(int length) {
+AllocationResult Heap::AllocateBigInt(int length, PretenureFlag pretenure) {
if (length < 0 || length > BigInt::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
}
int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(NOT_TENURED);
+ AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
@@ -2541,6 +2597,20 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
+AllocationResult Heap::AllocateFeedbackCell(Map* map, HeapObject* value) {
+ int size = FeedbackCell::kSize;
+ STATIC_ASSERT(FeedbackCell::kSize <= kMaxRegularHeapObjectSize);
+
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ FeedbackCell::cast(result)->set_value(value);
+ return result;
+}
+
AllocationResult Heap::AllocatePropertyCell(Name* name) {
DCHECK(name->IsUniqueName());
int size = PropertyCell::kSize;
@@ -2849,11 +2919,11 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
- instance->set_handler_table(empty_fixed_array());
+ instance->set_handler_table(empty_byte_array());
instance->set_source_position_table(empty_byte_array());
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
instance->clear_padding();
@@ -3145,10 +3215,10 @@ AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
AllocationResult Heap::AllocateCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset) {
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset) {
bool has_unwinding_info = desc.unwinding_info != nullptr;
DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
(!has_unwinding_info && desc.unwinding_info_size == 0));
@@ -3174,11 +3244,11 @@ AllocationResult Heap::AllocateCode(
code->set_relocation_info(reloc_info);
code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_handler_table_offset(handler_table_offset);
code->set_code_data_container(data_container);
code->set_has_tagged_params(true);
code->set_deoptimization_data(deopt_data);
code->set_stub_key(stub_key);
- code->set_handler_table(handler_table);
code->set_source_position_table(source_position_table);
code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
@@ -3322,6 +3392,21 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
return result;
}
+AllocationResult Heap::AllocateJSPromise(JSFunction* constructor,
+ PretenureFlag pretenure) {
+ AllocationResult allocation = AllocateJSObject(constructor, pretenure);
+ JSPromise* promise = nullptr;
+ if (!allocation.To(&promise)) return allocation;
+
+ // Setup JSPromise fields
+ promise->set_reactions_or_result(Smi::kZero);
+ promise->set_flags(0);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
+ promise->SetEmbedderField(i, Smi::kZero);
+ }
+ return promise;
+}
+
void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
Map* map) {
obj->set_raw_properties_or_hash(properties);
@@ -3503,28 +3588,17 @@ static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
int len) {
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
- size_t stream_length = vector.length();
- while (stream_length != 0) {
- size_t consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
+ while (!it.Done()) {
+ DCHECK_GT(len, 0);
+ len -= 1;
+
+ uint16_t c = *it;
+ ++it;
DCHECK_NE(unibrow::Utf8::kBadChar, c);
- DCHECK(consumed <= stream_length);
- stream_length -= consumed;
- stream += consumed;
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- len -= 2;
- if (len < 0) break;
- *chars++ = unibrow::Utf16::LeadSurrogate(c);
- *chars++ = unibrow::Utf16::TrailSurrogate(c);
- } else {
- len -= 1;
- if (len < 0) break;
- *chars++ = c;
- }
+ *chars++ = c;
}
- DCHECK_EQ(0, stream_length);
- DCHECK_EQ(0, len);
+ DCHECK_EQ(len, 0);
}
@@ -4447,12 +4521,8 @@ class MemoryPressureInterruptTask : public CancelableTask {
void Heap::CheckMemoryPressure() {
if (HighMemoryPressure()) {
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure();
@@ -4877,8 +4947,9 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
- &roots_[kStringTableRootIndex]));
+ v->VisitRootPointer(
+ Root::kStringTable, nullptr,
+ reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -4893,13 +4964,13 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
+ v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
&roots_[kRootListLength]);
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
- visitor->VisitRootPointer(Root::kWeakCollections,
+ visitor->VisitRootPointer(Root::kWeakCollections, nullptr,
&encountered_weak_collections_);
}
@@ -4913,9 +4984,13 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
USE(heap_);
}
- void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ FixHandle(p);
+ }
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) FixHandle(p);
}
@@ -4951,7 +5026,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
+ v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -5026,7 +5101,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over other strong roots (currently only identity maps).
for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
+ v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
@@ -5038,6 +5113,9 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
}
+void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
+ isolate_->global_handles()->IterateWeakRoots(v);
+}
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
@@ -5745,7 +5823,8 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
- use_tasks_ = false;
+ SetGCState(TEAR_DOWN);
+ DCHECK(!use_tasks_);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -6035,7 +6114,8 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
class PrintHandleVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -6057,7 +6137,8 @@ class CheckHandleCountVisitor : public RootVisitor {
~CheckHandleCountVisitor() override {
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
handle_count_ += end - start;
}
@@ -6207,7 +6288,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
MarkPointers(start, end);
}
@@ -6449,6 +6531,10 @@ void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
+void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
+ set_builtins_constants_table(cache);
+}
+
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
}
@@ -6480,19 +6566,13 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return true;
INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
- *object_type = "CODE_TYPE"; \
- *object_sub_type = "CODE_KIND/" #name; \
- return true;
- CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
-#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
- *object_type = "FIXED_ARRAY_TYPE"; \
- *object_sub_type = #name; \
+
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
+ *object_type = #name; \
+ *object_sub_type = ""; \
return true;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+ VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
}
return false;
@@ -6537,8 +6617,9 @@ void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
VerifyPointers(start, end);
}
-void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifyPointersVisitor::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
VerifyPointers(start, end);
}
@@ -6554,8 +6635,8 @@ void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
}
}
-void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
CHECK((*current)->IsSmi());
}
@@ -6580,12 +6661,11 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
- return dst == src || dst == OLD_SPACE;
+ return dst == NEW_SPACE || dst == OLD_SPACE;
case OLD_SPACE:
- return dst == src &&
- (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
+ return dst == OLD_SPACE;
case CODE_SPACE:
- return dst == src && type == CODE_TYPE;
+ return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
return false;
@@ -6612,6 +6692,7 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
}
+ DCHECK_GE(bytes_to_next_step_, 0);
}
namespace {
@@ -6638,12 +6719,24 @@ Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code->GetHeap()->code_map());
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
+ }
+#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (code != nullptr) return code;
+ }
+#endif
+
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7cc65479ca..63bcfb2990 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -114,6 +114,7 @@ using v8::MemoryPressureLevel;
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
V(Map, string_table_map, StringTableMap) \
V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
@@ -168,6 +169,8 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
+ V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -193,8 +196,11 @@ using v8::MemoryPressureLevel;
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
+ V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
+ EmptyFixedBigUint64Array) \
+ V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
V(Script, empty_script, EmptyScript) \
- V(Cell, undefined_cell, UndefinedCell) \
+ V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
@@ -213,6 +219,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
+ V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(PropertyCell, promise_then_protector, PromiseThenProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -230,7 +238,7 @@ using v8::MemoryPressureLevel;
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
- V(NumberDictionary, code_stubs, CodeStubs) \
+ V(SimpleNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, detached_contexts, DetachedContexts) \
@@ -242,6 +250,8 @@ using v8::MemoryPressureLevel;
/* slots refer to the code with the reference to the weak object. */ \
V(ArrayList, weak_new_space_object_to_code_list, \
WeakNewSpaceObjectToCodeList) \
+ /* Indirection lists for isolate-independent builtins */ \
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
@@ -340,6 +350,7 @@ using v8::MemoryPressureLevel;
V(JsConstructEntryCode) \
V(JsEntryCode) \
V(JSMessageObjectMap) \
+ V(ManyClosuresCell) \
V(ManyClosuresCellMap) \
V(MetaMap) \
V(MinusInfinityValue) \
@@ -363,6 +374,7 @@ using v8::MemoryPressureLevel;
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
+ V(SimpleNumberDictionaryMap) \
V(SloppyArgumentsElementsMap) \
V(SmallOrderedHashMapMap) \
V(SmallOrderedHashSetMap) \
@@ -377,7 +389,6 @@ using v8::MemoryPressureLevel;
V(TransitionArrayMap) \
V(TrueValue) \
V(TwoPointerFillerMap) \
- V(UndefinedCell) \
V(UndefinedMap) \
V(UndefinedValue) \
V(UninitializedMap) \
@@ -575,7 +586,13 @@ class Heap {
enum FindMementoMode { kForRuntime, kForGC };
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
+ enum HeapState {
+ NOT_IN_GC,
+ SCAVENGE,
+ MARK_COMPACT,
+ MINOR_MARK_COMPACT,
+ TEAR_DOWN
+ };
using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
@@ -966,6 +983,8 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
+ void stop_using_tasks() { use_tasks_ = false; }
+
bool use_tasks() const { return use_tasks_; }
// ===========================================================================
@@ -1062,7 +1081,7 @@ class Heap {
Object** roots_array_start() { return roots_; }
// Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(NumberDictionary* value);
+ void SetRootCodeStubs(SimpleNumberDictionary* value);
void SetRootMaterializedObjects(FixedArray* objects) {
roots_[kMaterializedObjectsRootIndex] = objects;
@@ -1110,6 +1129,8 @@ class Heap {
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
+ void SetBuiltinsConstantsTable(FixedArray* cache);
+
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
@@ -1161,15 +1182,15 @@ class Heap {
// Iterators. ================================================================
// ===========================================================================
- // Iterates over all roots in the heap.
void IterateRoots(RootVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
- // Iterates over all the other roots in the heap.
+ // Iterates over weak string tables.
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
+ // Iterates over weak global handles.
+ void IterateWeakGlobalHandles(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1571,6 +1592,11 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
+ bool allocation_step_in_progress() { return allocation_step_in_progress_; }
+ void set_allocation_step_in_progress(bool val) {
+ allocation_step_in_progress_ = val;
+ }
+
// ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -2076,7 +2102,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateBigInt(int length);
+ MUST_USE_RESULT AllocationResult
+ AllocateBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
@@ -2265,6 +2292,10 @@ class Heap {
// Allocate a tenured simple cell.
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+ // Allocate a tenured simple feedback cell.
+ MUST_USE_RESULT AllocationResult AllocateFeedbackCell(Map* map,
+ HeapObject* value);
+
// Allocate a tenured JS global property cell initialized with the hole.
MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
@@ -2287,13 +2318,16 @@ class Heap {
// Allocates a new code object (fully initialized). All header fields of the
// returned object are immutable and the code object is write protected.
- MUST_USE_RESULT AllocationResult
- AllocateCode(const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset);
+ MUST_USE_RESULT AllocationResult AllocateCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset);
+
+ MUST_USE_RESULT AllocationResult AllocateJSPromise(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2400,6 +2434,8 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_;
+ bool allocation_step_in_progress_;
+
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_;
@@ -2658,6 +2694,7 @@ class AlwaysAllocateScope {
Heap* heap_;
};
+// The CodeSpaceMemoryModificationScope can only be used by the main thread.
class CodeSpaceMemoryModificationScope {
public:
explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
@@ -2667,6 +2704,9 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
+// The CodePageMemoryModificationScope does not check if tansitions to
+// writeable and back to executable are actually allowed, i.e. the MemoryChunk
+// was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope {
public:
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
@@ -2689,7 +2729,8 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
private:
void VerifyPointers(Object** start, Object** end);
@@ -2699,7 +2740,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
};
// Space iterator for iterating over all the paged spaces of the heap: Map
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4868adc26e..a7b56e4315 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -203,11 +203,13 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -653,15 +655,17 @@ bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
- // The object can already be black in two cases:
- // 1. The object is a fixed array with the progress bar.
- // 2. The object is a JSObject that was colored black before
- // unsafe layout change.
- // 3. The object is a string that was colored black before
- // unsafe layout change.
if (!marking_state()->GreyToBlack(obj)) {
- DCHECK(IsFixedArrayWithProgressBar(obj) || obj->IsJSObject() ||
- obj->IsString());
+ // The object can already be black in these cases:
+ // 1. The object is a fixed array with the progress bar.
+ // 2. The object is a JSObject that was colored black before
+ // unsafe layout change.
+ // 3. The object is a string that was colored black before
+ // unsafe layout change.
+ // 4. The object is materizalized by the deoptimizer.
+ DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
+ obj->IsContextExtension() || obj->IsFixedArray() ||
+ obj->IsJSObject() || obj->IsString());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index b62aa93cde..8ca289cf1a 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_INL_H
-#define V8_INVALIDATED_SLOTS_INL_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
+#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
#include <map>
@@ -67,4 +67,4 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_INL_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_INL_H_
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 78ac03bc79..e9410575a3 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_H
-#define V8_INVALIDATED_SLOTS_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
+#define V8_HEAP_INVALIDATED_SLOTS_H_
#include <map>
#include <stack>
@@ -51,4 +51,4 @@ class InvalidatedSlotsFilter {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_H_
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
new file mode 100644
index 0000000000..1c8d4c8ac4
--- /dev/null
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -0,0 +1,130 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/item-parallel-job.h"
+
+#include "src/base/platform/semaphore.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
+
+ItemParallelJob::Task::~Task() {
+ // The histogram is reset in RunInternal(). If it's still around it means
+ // this task was cancelled before being scheduled.
+ if (gc_parallel_task_latency_histogram_)
+ gc_parallel_task_latency_histogram_->RecordAbandon();
+}
+
+void ItemParallelJob::Task::SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
+ on_finish_ = on_finish;
+ items_ = items;
+
+ if (start_index < items->size()) {
+ cur_index_ = start_index;
+ } else {
+ items_considered_ = items_->size();
+ }
+
+ gc_parallel_task_latency_histogram_ =
+ std::move(gc_parallel_task_latency_histogram);
+}
+
+void ItemParallelJob::Task::RunInternal() {
+ if (gc_parallel_task_latency_histogram_) {
+ gc_parallel_task_latency_histogram_->RecordDone();
+ gc_parallel_task_latency_histogram_.reset();
+ }
+
+ RunInParallel();
+ on_finish_->Signal();
+}
+
+ItemParallelJob::ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
+ base::Semaphore* pending_tasks)
+ : cancelable_task_manager_(cancelable_task_manager),
+ pending_tasks_(pending_tasks) {}
+
+ItemParallelJob::~ItemParallelJob() {
+ for (size_t i = 0; i < items_.size(); i++) {
+ Item* item = items_[i];
+ CHECK(item->IsFinished());
+ delete item;
+ }
+}
+
+void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
+ DCHECK_GT(tasks_.size(), 0);
+ const size_t num_items = items_.size();
+ const size_t num_tasks = tasks_.size();
+
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ItemParallelJob::Run", TRACE_EVENT_SCOPE_THREAD,
+ "num_tasks", static_cast<int>(num_tasks), "num_items",
+ static_cast<int>(num_items));
+
+ AsyncTimedHistogram gc_parallel_task_latency_histogram(
+ async_counters->gc_parallel_task_latency(), async_counters);
+
+ // Some jobs have more tasks than items (when the items are mere coarse
+ // grain tasks that generate work dynamically for a second phase which all
+ // tasks participate in). Some jobs even have 0 items to preprocess but
+ // still have multiple tasks.
+ // TODO(gab): Figure out a cleaner scheme for this.
+ const size_t num_tasks_processing_items = Min(num_items, tasks_.size());
+
+ // In the event of an uneven workload, distribute an extra item to the first
+ // |items_remainder| tasks.
+ const size_t items_remainder = num_tasks_processing_items > 0
+ ? num_items % num_tasks_processing_items
+ : 0;
+ // Base |items_per_task|, will be bumped by 1 for the first
+ // |items_remainder| tasks.
+ const size_t items_per_task = num_tasks_processing_items > 0
+ ? num_items / num_tasks_processing_items
+ : 0;
+ CancelableTaskManager::Id* task_ids =
+ new CancelableTaskManager::Id[num_tasks];
+ Task* main_task = nullptr;
+ for (size_t i = 0, start_index = 0; i < num_tasks;
+ i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
+ Task* task = tasks_[i];
+
+ // By definition there are less |items_remainder| to distribute then
+ // there are tasks processing items so this cannot overflow while we are
+ // assigning work items.
+ DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
+
+ task->SetupInternal(pending_tasks_, &items_, start_index,
+ i > 0 ? gc_parallel_task_latency_histogram
+ : base::Optional<AsyncTimedHistogram>());
+ task_ids[i] = task->id();
+ if (i > 0) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ main_task = task;
+ }
+ }
+
+ // Contribute on main thread.
+ main_task->Run();
+ delete main_task;
+
+ // Wait for background tasks.
+ for (size_t i = 0; i < num_tasks; i++) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_tasks_->Wait();
+ }
+ }
+ delete[] task_ids;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 23c709f87b..4c21f69ca9 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -2,18 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_ITEM_PARALLEL_JOB_
-#define V8_HEAP_ITEM_PARALLEL_JOB_
+#ifndef V8_HEAP_ITEM_PARALLEL_JOB_H_
+#define V8_HEAP_ITEM_PARALLEL_JOB_H_
+#include <memory>
#include <vector>
-#include "src/base/platform/semaphore.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/cancelable-task.h"
-#include "src/v8.h"
+#include "src/counters.h"
+#include "src/globals.h"
namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
namespace internal {
+class Counters;
class Isolate;
// This class manages background tasks that process a set of items in parallel.
@@ -25,14 +36,17 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
-class ItemParallelJob {
+//
+// Each parallel (non-main thread) task will report the time between the job
+// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
+class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
- class Item {
+ class V8_EXPORT_PRIVATE Item {
public:
- Item() : state_(kAvailable) {}
- virtual ~Item() {}
+ Item() = default;
+ virtual ~Item() = default;
// Marks an item as being finished.
void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
@@ -45,7 +59,7 @@ class ItemParallelJob {
}
bool IsFinished() { return state_.Value() == kFinished; }
- base::AtomicValue<ProcessingState> state_;
+ base::AtomicValue<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
@@ -53,15 +67,10 @@ class ItemParallelJob {
DISALLOW_COPY_AND_ASSIGN(Item);
};
- class Task : public CancelableTask {
+ class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
- explicit Task(Isolate* isolate)
- : CancelableTask(isolate),
- items_(nullptr),
- cur_index_(0),
- items_considered_(0),
- on_finish_(nullptr) {}
- virtual ~Task() {}
+ explicit Task(Isolate* isolate);
+ virtual ~Task();
virtual void RunInParallel() = 0;
@@ -85,42 +94,36 @@ class ItemParallelJob {
}
private:
- void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
- size_t start_index) {
- on_finish_ = on_finish;
- items_ = items;
- cur_index_ = start_index;
- }
+ friend class ItemParallelJob;
+ friend class Item;
- // We don't allow overriding this method any further.
- void RunInternal() final {
- RunInParallel();
- on_finish_->Signal();
- }
+ // Sets up state required before invoking Run(). If
+ // |start_index is >= items_.size()|, this task will not process work items
+ // (some jobs have more tasks than work items in order to parallelize post-
+ // processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
+ // provided, it will be used to report histograms on the latency between
+ // posting the task and it being scheduled.
+ void SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items,
+ size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
- std::vector<Item*>* items_;
- size_t cur_index_;
- size_t items_considered_;
- base::Semaphore* on_finish_;
+ // We don't allow overriding this method any further.
+ void RunInternal() final;
- friend class ItemParallelJob;
- friend class Item;
+ std::vector<Item*>* items_ = nullptr;
+ size_t cur_index_ = 0;
+ size_t items_considered_ = 0;
+ base::Semaphore* on_finish_ = nullptr;
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* pending_tasks)
- : cancelable_task_manager_(cancelable_task_manager),
- pending_tasks_(pending_tasks) {}
-
- ~ItemParallelJob() {
- for (size_t i = 0; i < items_.size(); i++) {
- Item* item = items_[i];
- CHECK(item->IsFinished());
- delete item;
- }
- }
+ base::Semaphore* pending_tasks);
+
+ ~ItemParallelJob();
// Adds a task to the job. Transfers ownership to the job.
void AddTask(Task* task) { tasks_.push_back(task); }
@@ -131,42 +134,9 @@ class ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
- void Run() {
- DCHECK_GE(tasks_.size(), 0);
- const size_t num_tasks = tasks_.size();
- const size_t num_items = items_.size();
- const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
- size_t start_index = 0;
- Task* main_task = nullptr;
- Task* task = nullptr;
- for (size_t i = 0; i < num_tasks; i++, start_index += items_per_task) {
- task = tasks_[i];
- if (start_index >= num_items) {
- start_index -= num_items;
- }
- task->SetupInternal(pending_tasks_, &items_, start_index);
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- } else {
- main_task = task;
- }
- }
- // Contribute on main thread.
- main_task->Run();
- delete main_task;
- // Wait for background tasks.
- for (size_t i = 0; i < num_tasks; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- delete[] task_ids;
- }
+ // Runs this job. Reporting metrics in a thread-safe manner to
+ // |async_counters|.
+ void Run(std::shared_ptr<Counters> async_counters);
private:
std::vector<Item*> items_;
@@ -179,4 +149,4 @@ class ItemParallelJob {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_ITEM_PARALLEL_JOB_
+#endif // V8_HEAP_ITEM_PARALLEL_JOB_H_
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 30a7e55d6b..c6c8c29962 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -16,7 +16,6 @@
#include "src/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
-#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -72,7 +71,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -240,7 +240,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -369,12 +370,14 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
buffered_objects_.reserve(kBufferSize);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
if (!(*p)->IsHeapObject()) return;
AddObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
AddObject(*p);
@@ -404,14 +407,22 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
std::vector<Object*> buffered_objects_;
};
-} // namespace
-
-static int NumberOfAvailableCores() {
- return Max(
- 1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+int NumberOfAvailableCores() {
+ static int num_cores =
+ static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
+ 1;
+ // This number of cores should be greater than zero and never change.
+ DCHECK_GE(num_cores, 1);
+ DCHECK_EQ(
+ num_cores,
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ return num_cores;
}
+} // namespace
+
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
@@ -855,7 +866,7 @@ void MarkCompactCollector::Prepare() {
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
heap()->incremental_marking()->AbortBlackAllocation();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
heap()->incremental_marking()->Deactivate();
ClearMarkbits();
AbortWeakCollections();
@@ -891,9 +902,10 @@ void MarkCompactCollector::Prepare() {
#endif
}
-void MarkCompactCollector::FinishConcurrentMarking() {
+void MarkCompactCollector::FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest stop_request) {
if (FLAG_concurrent_marking) {
- heap()->concurrent_marking()->EnsureCompleted();
+ heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
}
}
@@ -965,11 +977,12 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, Object** p) final {
+ void VisitRootPointer(Root root, const char* description, Object** p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) final {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
}
@@ -1058,7 +1071,8 @@ class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
@@ -1093,7 +1107,8 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
@@ -1391,7 +1406,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(object->map());
AllocationResult allocation =
local_allocator_->Allocate(target_space, size, alignment);
if (allocation.To(target_object)) {
@@ -1496,7 +1512,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
- AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(old_object->map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation =
local_allocator_->Allocate(NEW_SPACE, size, alignment);
@@ -1758,11 +1775,13 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
: collector_(collector),
marking_state_(collector_->non_atomic_marking_state()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -1883,6 +1902,8 @@ class BatchedRootMarkingItem : public MarkingItem {
virtual ~BatchedRootMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "BatchedRootMarkingItem::Process");
for (Object* object : objects_) {
task->MarkObject(object);
}
@@ -1900,6 +1921,8 @@ class PageMarkingItem : public MarkingItem {
virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
@@ -1956,6 +1979,8 @@ class GlobalHandlesMarkingItem : public MarkingItem {
virtual ~GlobalHandlesMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesMarkingItem::Process");
GlobalHandlesRootMarkingVisitor visitor(task);
global_handles_
->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
@@ -1968,12 +1993,14 @@ class GlobalHandlesMarkingItem : public MarkingItem {
explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
: task_(task) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
@@ -2061,7 +2088,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(worklist()->IsGlobalEmpty());
}
}
@@ -2336,7 +2363,8 @@ void MarkCompactCollector::MarkLiveObjects() {
}
ProcessMarkingWorklist();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
ProcessMarkingWorklist();
}
@@ -2849,11 +2877,13 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
UpdateSlotInternal(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
@@ -3009,6 +3039,7 @@ class Evacuator : public Malloced {
};
void Evacuator::EvacuatePage(Page* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
@@ -3066,11 +3097,15 @@ class FullEvacuator : public Evacuator {
};
void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "FullEvacuator::RawEvacuatePage", "evacuation_mode",
+ evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
HeapObject* failed_object = nullptr;
- switch (ComputeEvacuationMode(page)) {
+ switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
page, marking_state, &new_space_visitor_,
@@ -3127,6 +3162,8 @@ class YoungGenerationEvacuator : public Evacuator {
void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
@@ -3241,7 +3278,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run();
+ job->Run(isolate()->async_counters());
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
@@ -3249,15 +3286,16 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%" PRIuS
- " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
- live_bytes, compaction_speed);
+ PrintIsolate(
+ isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
+ wanted_num_tasks, job->NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
+ live_bytes, compaction_speed);
}
}
@@ -3365,6 +3403,8 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
Visitor* visitor,
IterationMode iteration_mode,
HeapObject** failed_object) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjects");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3389,6 +3429,8 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3407,6 +3449,8 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitGreyObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3553,6 +3597,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
private:
void ProcessVisitAll() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitAll");
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
@@ -3564,6 +3610,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
}
void ProcessVisitLive() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor;
@@ -3592,13 +3640,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "RememberedSetUpdatingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- template <AccessMode access_mode>
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (heap_->InFromSpace(*slot)) {
@@ -3606,13 +3655,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- if (access_mode == AccessMode::ATOMIC) {
- HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
- base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
- map_word.ToForwardingAddress());
- } else {
- *slot = map_word.ToForwardingAddress();
- }
+ *slot = map_word.ToForwardingAddress();
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
@@ -3648,12 +3691,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this](Address slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
@@ -3692,7 +3733,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [this](Object** slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
+ return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
});
@@ -3748,6 +3789,8 @@ class GlobalHandlesUpdatingItem : public UpdatingItem {
virtual ~GlobalHandlesUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesUpdatingItem::Process");
PointersUpdatingVisitor updating_visitor;
global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
@@ -3772,6 +3815,9 @@ class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
virtual ~ArrayBufferTrackerUpdatingItem() {}
void Process() override {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
+ state_);
switch (state_) {
case EvacuationState::kRegular:
ArrayBufferTracker::ProcessBuffers(
@@ -3922,7 +3968,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
}
{
@@ -3954,7 +4000,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
}
@@ -4016,7 +4062,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 6fda00633c..755f0eb4eb 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,7 @@
#include <deque>
#include <vector>
+#include "src/heap/concurrent-marking.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
@@ -649,7 +650,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// choosing spaces to compact.
void Prepare();
- void FinishConcurrentMarking();
+ // Stop concurrent marking (either by preempting it right away or waiting for
+ // it to complete as requested by |stop_request|).
+ void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
bool StartCompaction();
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 9b1fe61236..58630c52f0 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MARKING_H
-#define V8_MARKING_H
+#ifndef V8_HEAP_MARKING_H_
+#define V8_HEAP_MARKING_H_
#include "src/base/atomic-utils.h"
#include "src/utils.h"
@@ -316,4 +316,4 @@ class Marking : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MARKING_H_
+#endif // V8_HEAP_MARKING_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index cc1030846a..77317a7b8a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,6 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
+ if (!heap()->use_tasks()) return;
DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0f0ad6eaa0..ce6564596e 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_memory_reducer_H
-#define V8_HEAP_memory_reducer_H
+#ifndef V8_HEAP_MEMORY_REDUCER_H_
+#define V8_HEAP_MEMORY_REDUCER_H_
#include "include/v8-platform.h"
#include "src/base/macros.h"
@@ -171,4 +171,4 @@ class V8_EXPORT_PRIVATE MemoryReducer {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_memory_reducer_H
+#endif // V8_HEAP_MEMORY_REDUCER_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index f58a472671..b854dabb2c 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -1,4 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
+//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,6 +13,7 @@
#include "src/counters.h"
#include "src/globals.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/utils.h"
@@ -31,7 +33,6 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
}
- visited_fixed_array_sub_types_.clear();
}
// Tell the compiler to never inline this: occasionally, the optimizer will
@@ -99,23 +100,14 @@ void ObjectStats::PrintJSON(const char* key) {
#define INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
+
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
- CODE_KIND_LIST(CODE_KIND_WRAPPER)
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -150,25 +142,15 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "\"type_data\":{";
#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
DumpInstanceTypeData(stream, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
- CODE_KIND_LIST(CODE_KIND_WRAPPER);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -202,93 +184,88 @@ void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
}
void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
- size_t size) {
+ size_t size, size_t over_allocated) {
DCHECK_LE(type, LAST_VIRTUAL_TYPE);
object_counts_[FIRST_VIRTUAL_TYPE + type]++;
object_sizes_[FIRST_VIRTUAL_TYPE + type] += size;
size_histogram_[FIRST_VIRTUAL_TYPE + type][HistogramIndexFromSize(size)]++;
-}
-
-void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
- int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
- DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
- DCHECK_LT(code_sub_type_index, FIRST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[code_sub_type_index]++;
- object_sizes_[code_sub_type_index] += size;
- size_histogram_[code_sub_type_index][HistogramIndexFromSize(size)]++;
-}
-
-bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
- int array_sub_type, size_t size,
- size_t over_allocated) {
- auto it = visited_fixed_array_sub_types_.insert(array);
- if (!it.second) return false;
- DCHECK_LE(array_sub_type, LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
- size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(size)]++;
- if (over_allocated > 0) {
- InstanceType type =
- array->IsHashTable() ? HASH_TABLE_TYPE : FIXED_ARRAY_TYPE;
- over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
- over_allocated;
- over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(over_allocated)]++;
- over_allocated_[type] += over_allocated;
- over_allocated_histogram_[type][HistogramIndexFromSize(over_allocated)]++;
- }
- return true;
+ over_allocated_[FIRST_VIRTUAL_TYPE + type] += over_allocated;
+ over_allocated_histogram_[FIRST_VIRTUAL_TYPE + type]
+ [HistogramIndexFromSize(size)]++;
}
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
class ObjectStatsCollectorImpl {
public:
+ enum Phase {
+ kPhase1,
+ kPhase2,
+ };
+ static const int kNumberOfPhases = kPhase2 + 1;
+
ObjectStatsCollectorImpl(Heap* heap, ObjectStats* stats);
void CollectGlobalStatistics();
+ void CollectStatistics(HeapObject* obj, Phase phase);
- // Collects statistics of objects for virtual instance types.
- void CollectVirtualStatistics(HeapObject* obj);
+ private:
+ enum CowMode {
+ kCheckCow,
+ kIgnoreCow,
+ };
- // Collects statistics of objects for regular instance types.
- void CollectStatistics(HeapObject* obj);
+ Isolate* isolate() { return heap_->isolate(); }
- private:
- class CompilationCacheTableVisitor;
+ bool RecordVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated,
+ CowMode check_cow_array = kCheckCow);
+ // Gets size from |ob| and assumes no over allocating.
+ bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type);
+ // For HashTable it is possible to compute over allocated memory.
+ void RecordHashTableVirtualObjectStats(HeapObject* parent,
+ FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type);
- void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
- void RecordBytecodeArrayDetails(BytecodeArray* obj);
- void RecordCodeDetails(Code* code);
- void RecordFixedArrayDetails(FixedArray* array);
- void RecordJSCollectionDetails(JSObject* obj);
- void RecordJSObjectDetails(JSObject* object);
- void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
- void RecordMapDetails(Map* map);
- void RecordScriptDetails(Script* obj);
- void RecordTemplateInfoDetails(TemplateInfo* obj);
- void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
-
- bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype, size_t overhead);
- void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype);
- template <class HashTable>
- void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
+ bool CanRecordFixedArray(FixedArrayBase* array);
+ bool IsCowArray(FixedArrayBase* array);
- void RecordVirtualObjectStats(HeapObject* obj,
- ObjectStats::VirtualInstanceType type,
- size_t size);
+ // Blacklist for objects that should not be recorded using
+ // VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
+ // objects dispatch to the low level ObjectStats::RecordObjectStats manually.
+ bool ShouldRecordObject(HeapObject* object, CowMode check_cow_array);
+
+ void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+
+ // Specific recursion into constant pool or embedded code objects. Records
+ // FixedArrays and Tuple2 that look like ConstantElementsPair.
+ void RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type);
+
+ // Details.
void RecordVirtualAllocationSiteDetails(AllocationSite* site);
+ void RecordVirtualBytecodeArrayDetails(BytecodeArray* bytecode);
+ void RecordVirtualCodeDetails(Code* code);
+ void RecordVirtualContext(Context* context);
+ void RecordVirtualFeedbackVectorDetails(FeedbackVector* vector);
+ void RecordVirtualFixedArrayDetails(FixedArray* array);
+ void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo* fti);
+ void RecordVirtualJSGlobalObjectDetails(JSGlobalObject* object);
+ void RecordVirtualJSCollectionDetails(JSObject* object);
+ void RecordVirtualJSObjectDetails(JSObject* object);
+ void RecordVirtualMapDetails(Map* map);
+ void RecordVirtualScriptDetails(Script* script);
+ void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
+ void RecordVirtualJSFunctionDetails(JSFunction* function);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject*> virtual_objects_;
-
- friend class ObjectStatsCollectorImpl::CompilationCacheTableVisitor;
};
ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
@@ -298,18 +275,45 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
-// For entries which shared the same instance type (historically FixedArrays)
-// we do a pre-pass and create virtual instance types.
-void ObjectStatsCollectorImpl::CollectVirtualStatistics(HeapObject* obj) {
- if (obj->IsAllocationSite()) {
- RecordVirtualAllocationSiteDetails(AllocationSite::cast(obj));
+bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
+ CowMode check_cow_array) {
+ if (obj->IsFixedArray()) {
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
+ return CanRecordFixedArray(fixed_array) && cow_check;
}
+ if (obj == heap_->empty_property_array()) return false;
+ return true;
+}
+
+void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
+ HeapObject* parent, FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type) {
+ CHECK(hash_table->IsHashTable());
+ // TODO(mlippautz): Implement over allocation for hash tables.
+ RecordVirtualObjectStats(parent, hash_table, type, hash_table->Size(),
+ ObjectStats::kNoOverAllocation);
}
-void ObjectStatsCollectorImpl::RecordVirtualObjectStats(
- HeapObject* obj, ObjectStats::VirtualInstanceType type, size_t size) {
- virtual_objects_.insert(obj);
- stats_->RecordVirtualObjectStats(type, size);
+bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type) {
+ return RecordVirtualObjectStats(parent, obj, type, obj->Size(),
+ ObjectStats::kNoOverAllocation, kCheckCow);
+}
+
+bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated, CowMode check_cow_array) {
+ if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array))
+ return false;
+
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
+ virtual_objects_.insert(obj);
+ stats_->RecordVirtualObjectStats(type, size, over_allocated);
+ return true;
+ }
+ return false;
}
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
@@ -317,141 +321,290 @@ void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
if (!site->PointsToLiteral()) return;
JSObject* boilerplate = site->boilerplate();
if (boilerplate->IsJSArray()) {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_ARRAY_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordSimpleVirtualObjectStats(site, boilerplate,
+ ObjectStats::JS_ARRAY_BOILERPLATE_TYPE);
// Array boilerplates cannot have properties.
} else {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordVirtualObjectStats(
+ site, boilerplate, ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
+ boilerplate->Size(), ObjectStats::kNoOverAllocation);
if (boilerplate->HasFastProperties()) {
- // We'll misclassify the empty_proeprty_array here. Given that there is a
- // single instance, this is neglible.
+ // We'll mis-classify the empty_property_array here. Given that there is a
+ // single instance, this is negligible.
PropertyArray* properties = boilerplate->property_array();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
NameDictionary* properties = boilerplate->property_dictionary();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_NAME_DICTIONARY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
FixedArrayBase* elements = boilerplate->elements();
- // We skip COW elements since they are shared, and we are sure that if the
- // boilerplate exists there must have been at least one instantiation.
- if (!elements->IsCowArray()) {
- RecordVirtualObjectStats(elements, ObjectStats::BOILERPLATE_ELEMENTS_TYPE,
- elements->Size());
- }
+ RecordSimpleVirtualObjectStats(site, elements,
+ ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
-void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj) {
- Map* map = obj->map();
-
- // Record for the InstanceType.
- int object_size = obj->Size();
- RecordObjectStats(obj, map->instance_type(), object_size);
-
- // Record specific sub types where possible.
- if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
- if (obj->IsObjectTemplateInfo() || obj->IsFunctionTemplateInfo()) {
- RecordTemplateInfoDetails(TemplateInfo::cast(obj));
+void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo* fti) {
+ // named_property_handler and indexed_property_handler are recorded as
+ // INTERCEPTOR_INFO_TYPE.
+ if (!fti->call_code()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->call_code()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsBytecodeArray()) {
- RecordBytecodeArrayDetails(BytecodeArray::cast(obj));
+ if (!fti->instance_call_handler()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->instance_call_handler()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsCode()) RecordCodeDetails(Code::cast(obj));
- if (obj->IsSharedFunctionInfo()) {
- RecordSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
+ JSGlobalObject* object) {
+ // Properties.
+ GlobalDictionary* properties = object->global_dictionary();
+ RecordHashTableVirtualObjectStats(object, properties,
+ ObjectStats::GLOBAL_PROPERTIES_TYPE);
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements,
+ ObjectStats::GLOBAL_ELEMENTS_TYPE);
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
+ JSObject* object) {
+ if (object->IsJSMap()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSMap::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsFixedArray()) RecordFixedArrayDetails(FixedArray::cast(obj));
- if (obj->IsJSObject()) RecordJSObjectDetails(JSObject::cast(obj));
- if (obj->IsJSWeakCollection()) {
- RecordJSWeakCollectionDetails(JSWeakCollection::cast(obj));
+ if (object->IsJSSet()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSSet::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsJSCollection()) {
- RecordJSCollectionDetails(JSObject::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
+ // JSGlobalObject is recorded separately.
+ if (object->IsJSGlobalObject()) return;
+
+ // Properties.
+ if (object->HasFastProperties()) {
+ PropertyArray* properties = object->property_array();
+ CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
+ } else {
+ NameDictionary* properties = object->property_dictionary();
+ RecordHashTableVirtualObjectStats(
+ object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
- if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
}
-class ObjectStatsCollectorImpl::CompilationCacheTableVisitor
- : public RootVisitor {
- public:
- explicit CompilationCacheTableVisitor(ObjectStatsCollectorImpl* parent)
- : parent_(parent) {}
-
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- HeapObject* obj = HeapObject::cast(*current);
- if (obj->IsUndefined(parent_->heap_->isolate())) continue;
- CHECK(obj->IsCompilationCacheTable());
- parent_->RecordHashTableHelper(nullptr, CompilationCacheTable::cast(obj),
- COMPILATION_CACHE_TABLE_SUB_TYPE);
+static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
+ Object* obj, FeedbackSlotKind kind, Isolate* isolate) {
+ switch (kind) {
+ case FeedbackSlotKind::kCall:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE;
+
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE;
+
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_ENUM_TYPE;
+
+ default:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
+ }
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
+ FeedbackVector* vector) {
+ if (virtual_objects_.find(vector) == virtual_objects_.end()) {
+ // Manually insert the feedback vector into the virtual object list, since
+ // we're logging its component parts separately.
+ virtual_objects_.insert(vector);
+
+ size_t calculated_size = 0;
+
+ // Log the feedback vector's header (fixed fields).
+ size_t header_size =
+ reinterpret_cast<Address>(vector->slots_start()) - vector->address();
+ stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
+ header_size,
+ ObjectStats::kNoOverAllocation);
+ calculated_size += header_size;
+
+ // Iterate over the feedback slots and log each one.
+ FeedbackMetadataIterator it(vector->metadata());
+ while (it.HasNext()) {
+ FeedbackSlot slot = it.Next();
+ // Log the entry (or entries) taken up by this slot.
+ size_t slot_size = it.entry_size() * kPointerSize;
+ stats_->RecordVirtualObjectStats(
+ GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
+ slot_size, ObjectStats::kNoOverAllocation);
+ calculated_size += slot_size;
+
+ // Log the monomorphic/polymorphic helper objects that this slot owns.
+ for (int i = 0; i < it.entry_size(); i++) {
+ Object* raw_object = vector->get(slot.ToInt() + i);
+ if (!raw_object->IsHeapObject()) continue;
+ HeapObject* object = HeapObject::cast(raw_object);
+ if (object->IsCell() || object->IsFixedArray()) {
+ RecordSimpleVirtualObjectStats(
+ vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
+ }
+ }
}
+
+ CHECK_EQ(calculated_size, vector->Size());
}
+}
- private:
- ObjectStatsCollectorImpl* parent_;
-};
+void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
+ FixedArray* array) {
+ if (IsCowArray(array)) {
+ RecordVirtualObjectStats(nullptr, array, ObjectStats::COW_ARRAY_TYPE,
+ array->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
+ }
+}
+
+void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj, Phase phase) {
+ Map* map = obj->map();
+ switch (phase) {
+ case kPhase1:
+ if (obj->IsFeedbackVector()) {
+ RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
+ } else if (obj->IsMap()) {
+ RecordVirtualMapDetails(Map::cast(obj));
+ } else if (obj->IsBytecodeArray()) {
+ RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
+ } else if (obj->IsCode()) {
+ RecordVirtualCodeDetails(Code::cast(obj));
+ } else if (obj->IsFunctionTemplateInfo()) {
+ RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo::cast(obj));
+ } else if (obj->IsJSFunction()) {
+ RecordVirtualJSFunctionDetails(JSFunction::cast(obj));
+ } else if (obj->IsJSGlobalObject()) {
+ RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
+ } else if (obj->IsJSObject()) {
+ // This phase needs to come after RecordVirtualAllocationSiteDetails
+ // to properly split among boilerplates.
+ RecordVirtualJSObjectDetails(JSObject::cast(obj));
+ } else if (obj->IsJSCollection()) {
+ RecordVirtualJSCollectionDetails(JSObject::cast(obj));
+ } else if (obj->IsSharedFunctionInfo()) {
+ RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+ } else if (obj->IsContext()) {
+ RecordVirtualContext(Context::cast(obj));
+ } else if (obj->IsScript()) {
+ RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsFixedArray()) {
+ // Has to go last as it triggers too eagerly.
+ RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
+ }
+ break;
+ case kPhase2:
+ RecordObjectStats(obj, map->instance_type(), obj->Size());
+ break;
+ }
+}
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
- // Global FixedArrays.
- RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
- WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->serialized_objects(),
- SERIALIZED_OBJECTS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
- NUMBER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
- SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->string_split_cache(),
- STRING_SPLIT_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->regexp_multiple_cache(),
- REGEXP_MULTIPLE_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->retained_maps(),
- RETAINED_MAPS_SUB_TYPE, 0);
-
- // Global weak FixedArrays.
- RecordFixedArrayHelper(
+ // Iterate boilerplates first to disambiguate them from regular JS objects.
+ Object* list = heap_->allocation_sites_list();
+ while (list->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list);
+ RecordVirtualAllocationSiteDetails(site);
+ list = site->weak_next();
+ }
+
+ // FixedArray.
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->weak_new_space_object_to_code_list(),
+ ObjectStats::WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
+ ObjectStats::SERIALIZED_OBJECTS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
+ ObjectStats::NUMBER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->single_character_string_cache(),
+ ObjectStats::SINGLE_CHARACTER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->string_split_cache(),
+ ObjectStats::STRING_SPLIT_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->regexp_multiple_cache(),
+ ObjectStats::REGEXP_MULTIPLE_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
+ ObjectStats::RETAINED_MAPS_TYPE);
+
+ // WeakFixedArray.
+ RecordSimpleVirtualObjectStats(
nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
- NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, WeakFixedArray::cast(heap_->script_list()),
- SCRIPT_LIST_SUB_TYPE, 0);
-
- // Global hash tables.
- RecordHashTableHelper(nullptr, heap_->string_table(), STRING_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->weak_object_to_code_table(),
- OBJECT_TO_CODE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->code_stubs(),
- CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->empty_property_dictionary(),
- EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
- CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
- CompilationCacheTableVisitor v(this);
- compilation_cache->Iterate(&v);
+ ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr,
+ WeakFixedArray::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
+
+ // HashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
+ ObjectStats::STRING_TABLE_TYPE);
+ RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
+ ObjectStats::CODE_STUBS_TABLE_TYPE);
+
+ // WeakHashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->weak_object_to_code_table(),
+ ObjectStats::OBJECT_TO_CODE_TYPE);
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
InstanceType type,
size_t size) {
- if (virtual_objects_.find(obj) == virtual_objects_.end())
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
stats_->RecordObjectStats(type, size);
+ }
}
-static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
- return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
- array != heap->empty_fixed_array() &&
- array != heap->empty_sloppy_arguments_elements() &&
- array != heap->empty_slow_element_dictionary() &&
- array != heap->empty_property_dictionary();
+bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
+ return array != heap_->empty_fixed_array() &&
+ array != heap_->empty_sloppy_arguments_elements() &&
+ array != heap_->empty_slow_element_dictionary() &&
+ array != heap_->empty_property_dictionary();
}
-static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
- return array->map() == heap->fixed_cow_array_map();
+bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
+ return array->map() == heap_->fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
@@ -460,256 +613,226 @@ bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-bool ObjectStatsCollectorImpl::RecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype,
- size_t overhead) {
- if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
- !IsCowArray(heap_, array)) {
- return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
- overhead);
+void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
+ // TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
+
+ DescriptorArray* array = map->instance_descriptors();
+ if (map->owns_descriptors() && array != heap_->empty_descriptor_array()) {
+ // DescriptorArray has its own instance type.
+ EnumCache* enum_cache = array->GetEnumCache();
+ RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
+ ObjectStats::ENUM_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
+ ObjectStats::ENUM_INDICES_CACHE_TYPE);
}
- return false;
-}
-void ObjectStatsCollectorImpl::RecursivelyRecordFixedArrayHelper(
- HeapObject* parent, FixedArray* array, int subtype) {
- if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i)->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(
- parent, FixedArray::cast(array->get(i)), subtype);
+ if (map->is_prototype_map()) {
+ if (map->prototype_info()->IsPrototypeInfo()) {
+ PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
+ Object* users = info->prototype_users();
+ if (users->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(map, WeakFixedArray::cast(users),
+ ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
}
}
-template <class HashTable>
-void ObjectStatsCollectorImpl::RecordHashTableHelper(HeapObject* parent,
- HashTable* array,
- int subtype) {
- int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
- CHECK_GE(array->Size(), used);
- size_t overhead = array->Size() - used -
- HashTable::kElementsStartIndex * kPointerSize -
- FixedArray::kHeaderSize;
- RecordFixedArrayHelper(parent, array, subtype, overhead);
-}
-
-void ObjectStatsCollectorImpl::RecordJSObjectDetails(JSObject* object) {
- size_t overhead = 0;
- FixedArrayBase* elements = object->elements();
- if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
- if (elements->IsDictionary() && SameLiveness(object, elements)) {
- NumberDictionary* dict = NumberDictionary::cast(elements);
- RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
- } else {
- if (IsHoleyElementsKind(object->GetElementsKind())) {
- int used = object->GetFastElementsUsage() * kPointerSize;
- if (object->GetElementsKind() == HOLEY_DOUBLE_ELEMENTS) used *= 2;
- CHECK_GE(elements->Size(), used);
- overhead = elements->Size() - used - FixedArray::kHeaderSize;
- }
- stats_->RecordFixedArraySubTypeStats(elements, PACKED_ELEMENTS_SUB_TYPE,
- elements->Size(), overhead);
+void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
+ FixedArray* infos = script->shared_function_infos();
+ RecordSimpleVirtualObjectStats(
+ script, script->shared_function_infos(),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ // Split off weak cells from the regular weak cell type.
+ for (int i = 0; i < infos->length(); i++) {
+ if (infos->get(i)->IsWeakCell()) {
+ RecordSimpleVirtualObjectStats(
+ infos, WeakCell::cast(infos->get(i)),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
}
}
- if (object->IsJSGlobalObject()) {
- GlobalDictionary* properties =
- JSGlobalObject::cast(object)->global_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
- } else if (!object->HasFastProperties()) {
- NameDictionary* properties = object->property_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
+ // Log the size of external source code.
+ Object* source = script->source();
+ if (source->IsExternalString()) {
+ // The contents of external strings aren't on the heap, so we have to record
+ // them manually.
+ ExternalString* external_source_string = ExternalString::cast(source);
+ size_t length_multiplier = external_source_string->IsTwoByteRepresentation()
+ ? kShortSize
+ : kCharSize;
+ size_t off_heap_size = external_source_string->length() * length_multiplier;
+ size_t on_heap_size = external_source_string->Size();
+ RecordVirtualObjectStats(script, external_source_string,
+ ObjectStats::SCRIPT_SOURCE_EXTERNAL_TYPE,
+ on_heap_size + off_heap_size,
+ ObjectStats::kNoOverAllocation);
+ } else if (source->IsHeapObject()) {
+ RecordSimpleVirtualObjectStats(
+ script, HeapObject::cast(source),
+ ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordJSWeakCollectionDetails(
- JSWeakCollection* obj) {
- if (obj->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(obj->table());
- int used = table->NumberOfElements() * ObjectHashTable::kEntrySize;
- size_t overhead = table->Size() - used;
- RecordFixedArrayHelper(obj, table, JS_WEAK_COLLECTION_SUB_TYPE, overhead);
+void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
+ SharedFunctionInfo* info) {
+ // Uncompiled SharedFunctionInfo gets its own category.
+ if (!info->is_compiled()) {
+ RecordSimpleVirtualObjectStats(
+ nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
+ // SharedFunctonInfo::feedback_metadata() is a COW array.
+ FeedbackMetadata* fm = FeedbackMetadata::cast(info->feedback_metadata());
+ RecordVirtualObjectStats(info, fm, ObjectStats::FEEDBACK_METADATA_TYPE,
+ fm->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
}
-void ObjectStatsCollectorImpl::RecordJSCollectionDetails(JSObject* obj) {
- // The JS versions use a different HashTable implementation that cannot use
- // the regular helper. Since overall impact is usually small just record
- // without overhead.
- if (obj->IsJSMap()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSMap::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
- }
- if (obj->IsJSSet()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSSet::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
+ JSFunction* function) {
+ // Uncompiled JSFunctions get their own category.
+ if (!function->is_compiled()) {
+ RecordSimpleVirtualObjectStats(nullptr, function,
+ ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordScriptDetails(Script* obj) {
- FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
- RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
-}
+namespace {
-void ObjectStatsCollectorImpl::RecordMapDetails(Map* map_obj) {
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
- SameLiveness(map_obj, array)) {
- RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
- EnumCache* enum_cache = array->GetEnumCache();
- RecordFixedArrayHelper(array, enum_cache->keys(), ENUM_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(array, enum_cache->indices(),
- ENUM_INDICES_CACHE_SUB_TYPE, 0);
- }
+bool MatchesConstantElementsPair(Object* object) {
+ if (!object->IsTuple2()) return false;
+ Tuple2* tuple = Tuple2::cast(object);
+ return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArray();
+}
- for (DependentCode* cur_dependent_code = map_obj->dependent_code();
- cur_dependent_code != heap_->empty_fixed_array();
- cur_dependent_code = DependentCode::cast(
- cur_dependent_code->get(DependentCode::kNextLinkIndex))) {
- RecordFixedArrayHelper(map_obj, cur_dependent_code, DEPENDENT_CODE_SUB_TYPE,
- 0);
- }
+} // namespace
- if (map_obj->is_prototype_map()) {
- if (map_obj->prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* info = PrototypeInfo::cast(map_obj->prototype_info());
- Object* users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
- RecordFixedArrayHelper(map_obj, WeakFixedArray::cast(users),
- PROTOTYPE_USERS_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type) {
+ if (RecordSimpleVirtualObjectStats(parent, object, type)) {
+ if (object->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); i++) {
+ Object* entry = array->get(i);
+ if (!entry->IsHeapObject()) continue;
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ array, HeapObject::cast(entry), type);
}
+ } else if (MatchesConstantElementsPair(object)) {
+ Tuple2* tuple = Tuple2::cast(object);
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ tuple, HeapObject::cast(tuple->value2()), type);
}
}
}
-void ObjectStatsCollectorImpl::RecordTemplateInfoDetails(TemplateInfo* obj) {
- if (obj->property_accessors()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
- TEMPLATE_INFO_SUB_TYPE, 0);
- }
- if (obj->property_list()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_list()),
- TEMPLATE_INFO_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
+ BytecodeArray* bytecode) {
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->constant_pool(),
+ ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
+ // FixedArrays on constant pool are used for holding descriptor information.
+ // They are shared with optimized code.
+ FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ Object* entry = constant_pool->get(i);
+ if (entry->IsFixedArray() || MatchesConstantElementsPair(entry)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ constant_pool, HeapObject::cast(entry),
+ ObjectStats::EMBEDDED_OBJECT_TYPE);
+ }
}
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->handler_table(),
+ ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
}
-void ObjectStatsCollectorImpl::RecordBytecodeArrayDetails(BytecodeArray* obj) {
- RecordFixedArrayHelper(obj, obj->constant_pool(),
- BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
- RecordFixedArrayHelper(obj, obj->handler_table(),
- BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
+namespace {
+
+ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
+ Code::Kind kind) {
+ switch (kind) {
+#define CODE_KIND_CASE(type) \
+ case Code::type: \
+ return ObjectStats::type;
+ CODE_KIND_LIST(CODE_KIND_CASE)
+#undef CODE_KIND_CASE
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
}
-void ObjectStatsCollectorImpl::RecordCodeDetails(Code* code) {
- stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
- RecordFixedArrayHelper(code, code->deoptimization_data(),
- DEOPTIMIZATION_DATA_SUB_TYPE, 0);
+} // namespace
+
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
+ RecordSimpleVirtualObjectStats(nullptr, code,
+ CodeKindToVirtualInstanceType(code->kind()));
+ RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
+ ObjectStats::DEOPTIMIZATION_DATA_TYPE);
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationData* input_data =
DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
- RecordFixedArrayHelper(code->deoptimization_data(),
- input_data->LiteralArray(),
- OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ RecordSimpleVirtualObjectStats(code->deoptimization_data(),
+ input_data->LiteralArray(),
+ ObjectStats::OPTIMIZED_CODE_LITERALS_TYPE);
}
}
- RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
- 0);
int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(code, FixedArray::cast(target),
- EMBEDDED_OBJECT_SUB_TYPE);
+ if (target->IsFixedArray() || MatchesConstantElementsPair(target)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
}
}
-void ObjectStatsCollectorImpl::RecordSharedFunctionInfoDetails(
- SharedFunctionInfo* sfi) {
- FixedArray* scope_info = sfi->scope_info();
- RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
- FeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
- if (!feedback_metadata->is_empty()) {
- RecordFixedArrayHelper(sfi, feedback_metadata, FEEDBACK_METADATA_SUB_TYPE,
- 0);
- }
-}
-
-void ObjectStatsCollectorImpl::RecordFixedArrayDetails(FixedArray* array) {
- if (array->IsContext()) {
- RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
- }
- if (IsCowArray(heap_, array) && CanRecordFixedArray(heap_, array)) {
- stats_->RecordFixedArraySubTypeStats(array, COPY_ON_WRITE_SUB_TYPE,
- array->Size(), 0);
- }
- if (array->IsNativeContext()) {
- Context* native_ctx = Context::cast(array);
- RecordHashTableHelper(array,
- native_ctx->slow_template_instantiations_cache(),
- SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE);
- FixedArray* fast_cache = native_ctx->fast_template_instantiations_cache();
- stats_->RecordFixedArraySubTypeStats(
- fast_cache, FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE,
- fast_cache->Size(), 0);
+void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
+ if (context->IsNativeContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::NATIVE_CONTEXT_TYPE);
+ } else if (context->IsFunctionContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::FUNCTION_CONTEXT_TYPE);
+ } else {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::OTHER_CONTEXT_TYPE);
}
}
class ObjectStatsVisitor {
public:
- enum CollectionMode {
- kRegular,
- kVirtual,
- };
-
ObjectStatsVisitor(Heap* heap, ObjectStatsCollectorImpl* live_collector,
ObjectStatsCollectorImpl* dead_collector,
- CollectionMode mode)
+ ObjectStatsCollectorImpl::Phase phase)
: live_collector_(live_collector),
dead_collector_(dead_collector),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
- mode_(mode) {}
+ phase_(phase) {}
bool Visit(HeapObject* obj, int size) {
if (marking_state_->IsBlack(obj)) {
- Collect(live_collector_, obj);
+ live_collector_->CollectStatistics(obj, phase_);
} else {
DCHECK(!marking_state_->IsGrey(obj));
- Collect(dead_collector_, obj);
+ dead_collector_->CollectStatistics(obj, phase_);
}
return true;
}
private:
- void Collect(ObjectStatsCollectorImpl* collector, HeapObject* obj) {
- switch (mode_) {
- case kRegular:
- collector->CollectStatistics(obj);
- break;
- case kVirtual:
- collector->CollectVirtualStatistics(obj);
- break;
- }
- }
-
ObjectStatsCollectorImpl* live_collector_;
ObjectStatsCollectorImpl* dead_collector_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
- CollectionMode mode_;
+ ObjectStatsCollectorImpl::Phase phase_;
};
namespace {
@@ -731,19 +854,10 @@ void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
void ObjectStatsCollector::Collect() {
ObjectStatsCollectorImpl live_collector(heap_, live_);
ObjectStatsCollectorImpl dead_collector(heap_, dead_);
- // 1. Collect system type otherwise indistinguishable from other types.
- {
- ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kVirtual);
- IterateHeap(heap_, &visitor);
- }
-
- // 2. Collect globals; only applies to live objects.
live_collector.CollectGlobalStatistics();
- // 3. Collect rest.
- {
+ for (int i = 0; i < ObjectStatsCollectorImpl::kNumberOfPhases; i++) {
ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kRegular);
+ static_cast<ObjectStatsCollectorImpl::Phase>(i));
IterateHeap(heap_, &visitor);
}
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 500ce36bd9..723ae53fd5 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -5,13 +5,8 @@
#ifndef V8_HEAP_OBJECT_STATS_H_
#define V8_HEAP_OBJECT_STATS_H_
-#include <set>
-
-#include "src/base/ieee754.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
#include "src/objects.h"
+#include "src/objects/code.h"
// These instance types do not exist for actual use but are merely introduced
// for object stats tracing. In contrast to Code and FixedArray sub types
@@ -19,18 +14,71 @@
// tracing.
//
// Update LAST_VIRTUAL_TYPE below when changing this macro.
-#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
- V(BOILERPLATE_ELEMENTS_TYPE) \
- V(BOILERPLATE_NAME_DICTIONARY_TYPE) \
- V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
- V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_OBJECT_BOILERPLATE_TYPE)
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ CODE_KIND_LIST(V) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
+ V(CODE_STUBS_TABLE_TYPE) \
+ V(COW_ARRAY_TYPE) \
+ V(DEOPTIMIZATION_DATA_TYPE) \
+ V(DEPENDENT_CODE_TYPE) \
+ V(ELEMENTS_TYPE) \
+ V(EMBEDDED_OBJECT_TYPE) \
+ V(ENUM_CACHE_TYPE) \
+ V(ENUM_INDICES_CACHE_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FEEDBACK_VECTOR_ENTRY_TYPE) \
+ V(FEEDBACK_VECTOR_HEADER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_ENUM_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
+ V(GLOBAL_ELEMENTS_TYPE) \
+ V(GLOBAL_PROPERTIES_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(NUMBER_STRING_CACHE_TYPE) \
+ V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
+ V(OBJECT_TO_CODE_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_TYPE) \
+ V(OTHER_CONTEXT_TYPE) \
+ V(PROTOTYPE_USERS_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_TYPE) \
+ V(RETAINED_MAPS_TYPE) \
+ V(SCRIPT_LIST_TYPE) \
+ V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SERIALIZED_OBJECTS_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
+ V(STRING_SPLIT_CACHE_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(UNCOMPILED_JS_FUNCTION_TYPE) \
+ V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE)
namespace v8 {
namespace internal {
+class Heap;
+class Isolate;
+
class ObjectStats {
public:
+ static const size_t kNoOverAllocation = 0;
+
explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
// See description on VIRTUAL_INSTANCE_TYPE_LIST.
@@ -38,18 +86,14 @@ class ObjectStats {
#define DEFINE_VIRTUAL_INSTANCE_TYPE(type) type,
VIRTUAL_INSTANCE_TYPE_LIST(DEFINE_VIRTUAL_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_VIRTUAL_TYPE = JS_OBJECT_BOILERPLATE_TYPE,
+ LAST_VIRTUAL_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE,
};
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- FIRST_VIRTUAL_TYPE =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ FIRST_VIRTUAL_TYPE = LAST_TYPE + 1,
OBJECT_STATS_COUNT = FIRST_VIRTUAL_TYPE + LAST_VIRTUAL_TYPE + 1,
};
@@ -60,10 +104,8 @@ class ObjectStats {
void CheckpointObjectStats();
void RecordObjectStats(InstanceType type, size_t size);
- void RecordVirtualObjectStats(VirtualInstanceType type, size_t size);
- void RecordCodeSubTypeStats(int code_sub_type, size_t size);
- bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
- size_t size, size_t over_allocated);
+ void RecordVirtualObjectStats(VirtualInstanceType type, size_t size,
+ size_t over_allocated);
size_t object_count_last_gc(size_t index) {
return object_counts_last_time_[index];
@@ -105,8 +147,6 @@ class ObjectStats {
// Detailed histograms by InstanceType.
size_t size_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
size_t over_allocated_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
-
- std::set<FixedArrayBase*> visited_fixed_array_sub_types_;
};
class ObjectStatsCollector {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 0a8c866979..8384cead02 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_INL_H_
+#define V8_HEAP_OBJECTS_VISITING_INL_H_
#include "src/heap/objects-visiting.h"
@@ -189,4 +189,4 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_INL_H_
+#endif // V8_HEAP_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c20434a283..7746c91c71 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_H_
+#define V8_HEAP_OBJECTS_VISITING_H_
#include "src/allocation.h"
#include "src/layout-descriptor.h"
@@ -31,6 +31,7 @@ class JSWeakCollection;
V(Code) \
V(CodeDataContainer) \
V(ConsString) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -132,4 +133,4 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_H_
+#endif // V8_HEAP_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cd9c45141d..4e0f259c00 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REMEMBERED_SET_H
-#define V8_REMEMBERED_SET_H
+#ifndef V8_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_REMEMBERED_SET_H_
#include "src/assembler.h"
#include "src/heap/heap.h"
@@ -298,8 +298,7 @@ class UpdateTypedSlotHelper {
Object* new_target = old_target;
SlotCallbackResult result = callback(&new_target);
if (new_target != old_target) {
- rinfo->set_target_address(old_target->GetIsolate(),
- Code::cast(new_target)->instruction_start());
+ rinfo->set_target_address(Code::cast(new_target)->instruction_start());
}
return result;
}
@@ -359,4 +358,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
-#endif // V8_REMEMBERED_SET_H
+#endif // V8_HEAP_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index e84659c6d4..34f7bfafc3 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE ScavengeJob {
static const int kAverageIdleTimeMs = 5;
// The number of bytes to be allocated in new space before the next idle
// task is posted.
- static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+ static const size_t kBytesAllocatedBeforeNextIdleTask = 1024 * KB;
// The minimum size of allocated new space objects to trigger a scavenge.
static const size_t kMinAllocationLimit = 512 * KB;
// The allocation limit cannot exceed this fraction of the new space capacity.
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index b61872074e..2971db98cc 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -71,7 +71,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
@@ -97,7 +97,7 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
int object_size) {
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -228,9 +228,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
- base::AsAtomicPointer::Relaxed_Store(p, dest);
+ DCHECK(heap()->InFromSpace(*p));
+ *p = first_word.ToForwardingAddress();
return;
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index be5fb87a90..3baba9521b 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -97,6 +97,7 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::ScavengePage");
CodePageMemoryModificationScope memory_modification_scope(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
@@ -115,6 +116,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
}
void Scavenger::Process(OneshotBarrier* barrier) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
@@ -160,12 +162,13 @@ void Scavenger::Finalize() {
allocator_.Finalize();
}
-void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
+void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
+ Object** p) {
ScavengePointer(p);
}
-void RootScavengeVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) ScavengePointer(p);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 27ae2e8ab7..e0008ae694 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -117,8 +117,9 @@ class RootScavengeVisitor final : public RootVisitor {
RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
: heap_(heap), scavenger_(scavenger) {}
- void VisitRootPointer(Root root, Object** p) final;
- void VisitRootPointers(Root root, Object** start, Object** end) final;
+ void VisitRootPointer(Root root, const char* description, Object** p) final;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final;
private:
void ScavengePointer(Object** p);
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 9e2d7e6354..8a7aca1694 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -222,7 +222,7 @@ bool Heap::CreateInitialMaps() {
(constructor_function_index)); \
}
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
@@ -289,12 +289,17 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ // The "no closures" and "one closure" FeedbackCell maps need
+ // to be marked unstable because their objects can change maps.
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
+ no_closures_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
+ one_closure_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
@@ -303,6 +308,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
@@ -475,7 +481,7 @@ void Heap::CreateInitialObjects() {
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- set_code_stubs(*NumberDictionary::New(isolate(), 128));
+ set_code_stubs(*SimpleNumberDictionary::New(isolate(), 128));
{
HandleScope scope(isolate());
@@ -533,7 +539,10 @@ void Heap::CreateInitialObjects() {
set_regexp_multiple_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
- set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+ // Allocate FeedbackCell for builtins.
+ Handle<FeedbackCell> many_closures_cell =
+ factory->NewManyClosuresCell(factory->undefined_value());
+ set_many_closures_cell(*many_closures_cell);
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -638,6 +647,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_hook_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_then_protector(*cell);
+
set_serialized_objects(empty_fixed_array());
set_serialized_global_proxy_sizes(empty_fixed_array());
@@ -650,6 +667,9 @@ void Heap::CreateInitialObjects() {
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
+ // Initialize builtins constants table.
+ set_builtins_constants_table(empty_fixed_array());
+
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index f1edb6f2fb..7423665bcb 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SLOT_SET_H
-#define V8_SLOT_SET_H
+#ifndef V8_HEAP_SLOT_SET_H_
+#define V8_HEAP_SLOT_SET_H_
#include <map>
#include <stack>
@@ -641,4 +641,4 @@ class TypedSlotSet {
} // namespace internal
} // namespace v8
-#endif // V8_SLOT_SET_H
+#endif // V8_HEAP_SLOT_SET_H_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 39a62327df..498c34bd54 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/base/v8-fallthrough.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/msan.h"
@@ -137,12 +138,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-void MemoryChunk::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
- }
-}
-
bool PagedSpace::Contains(Address addr) {
if (heap_->lo_space()->FindPage(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
@@ -157,6 +152,7 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
+ category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
});
}
@@ -164,7 +160,8 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
- page->ForAllFreeListCategories([&added](FreeListCategory* category) {
+ page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
+ category->set_free_list(&free_list_);
added += category->available();
category->Relink();
});
@@ -230,23 +227,23 @@ MemoryChunk* MemoryChunkIterator::next() {
case kOldSpaceState: {
if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
state_ = kMapState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kMapState: {
if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
state_ = kCodeState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kCodeState: {
if (code_iterator_ != heap_->code_space()->end())
return *(code_iterator_++);
state_ = kLargeObjectState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kLargeObjectState: {
if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
state_ = kFinishedState;
- // Fall through;
+ V8_FALLTHROUGH;
}
case kFinishedState:
return nullptr;
@@ -256,23 +253,14 @@ MemoryChunk* MemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeListCategory::page() const {
- return Page::FromAddress(
- reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
-}
-
Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
-FreeList* FreeListCategory::owner() {
- return reinterpret_cast<PagedSpace*>(
- Page::FromAddress(reinterpret_cast<Address>(this))->owner())
- ->free_list();
-}
+FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
- return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
+ return prev_ != nullptr || next_ != nullptr;
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 2dd5e9b24d..d90cac90f2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -71,6 +71,8 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
+ DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
+
for (SpaceIterator it(heap_); it.has_next();) {
it.next()->PauseAllocationObservers();
}
@@ -322,7 +324,12 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ unmapper_->active_unmapping_tasks_.Decrement(1);
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(),
+ "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ }
}
Unmapper* const unmapper_;
@@ -332,13 +339,26 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
- if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
+ if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
+ kMaxUnmapperTasks);
+ }
return;
}
UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
- DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
- task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
+ task->id());
+ }
+ DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
+ DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_.Value(), 0);
+ active_unmapping_tasks_.Increment(1);
+ task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
} else {
@@ -347,18 +367,41 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
void MemoryAllocator::Unmapper::WaitUntilCompleted() {
- for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
+ for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
- concurrent_unmapping_tasks_active_ = 0;
+ pending_unmapping_tasks_ = 0;
+ active_unmapping_tasks_.SetValue(0);
+
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::WaitUntilCompleted: no tasks remaining\n");
+ }
+}
+
+bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
+ DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
+
+ if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
+ // All previous unmapping tasks have been run to completion.
+ // Finalize those tasks to make room for new ones.
+ WaitUntilCompleted();
+ }
+ return pending_unmapping_tasks_ != kMaxUnmapperTasks;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
+ NumberOfChunks());
+ }
// Regular chunks.
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
@@ -380,7 +423,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, concurrent_unmapping_tasks_active_);
+ CHECK_EQ(0, pending_unmapping_tasks_);
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
@@ -583,7 +626,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
- chunk->InitializeFreeListCategories();
+
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ chunk->categories_[i] = nullptr;
+ }
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
@@ -606,6 +652,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
+
return chunk;
}
@@ -615,6 +662,8 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
+ page->AllocateFreeListCategories();
+ page->InitializeFreeListCategories();
page->InitializationMemoryFence();
return page;
}
@@ -662,6 +711,28 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return page;
}
+void Page::AllocateFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i] = new FreeListCategory(
+ reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
+ }
+}
+
+void Page::InitializeFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
+ }
+}
+
+void Page::ReleaseFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
+ }
+}
+
Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
@@ -679,6 +750,10 @@ size_t MemoryChunk::CommittedPhysicalMemory() {
return high_water_mark_.Value();
}
+bool MemoryChunk::IsPagedSpace() const {
+ return owner()->identity() != LO_SPACE;
+}
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
@@ -710,7 +785,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
- void* address_hint = heap->GetRandomMmapAddr();
+ void* address_hint =
+ AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
//
// MemoryChunk layout:
@@ -826,8 +902,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
owner);
}
- return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, &reservation);
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ executable, owner, &reservation);
+
+ if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+ return chunk;
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
@@ -970,6 +1050,8 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
+
+ if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
}
@@ -1005,7 +1087,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
- // Fall through to kPreFreeAndQueue.
+ V8_FALLTHROUGH;
case kPreFreeAndQueue:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
@@ -1198,6 +1280,11 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+
+ if (IsPagedSpace()) {
+ Page* page = static_cast<Page*>(this);
+ page->ReleaseFreeListCategories();
+ }
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1345,12 +1432,17 @@ void Space::ResumeAllocationObservers() {
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
- if (AllocationObserversActive()) {
- heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
- for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(bytes_since_last, soon_object, size);
- }
+ if (!AllocationObserversActive()) {
+ return;
+ }
+
+ DCHECK(!heap()->allocation_step_in_progress());
+ heap()->set_allocation_step_in_progress(true);
+ heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
+ for (AllocationObserver* observer : allocation_observers_) {
+ observer->AllocationStep(bytes_since_last, soon_object, size);
}
+ heap()->set_allocation_step_in_progress(false);
}
intptr_t Space::GetNextInlineAllocationStepSize() {
@@ -1359,15 +1451,13 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
- DCHECK(allocation_observers_.size() == 0 || next_step != 0);
+ DCHECK(allocation_observers_.size() == 0 || next_step > 0);
return next_step;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : SpaceWithLinearArea(heap, space, executable),
- anchor_(this),
- free_list_(this) {
+ : SpaceWithLinearArea(heap, space, executable), anchor_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
@@ -1570,7 +1660,8 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
- Free(page->area_start(), page->area_size());
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
return true;
}
@@ -1606,7 +1697,8 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit);
+ Free(new_limit, old_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
@@ -1692,7 +1784,8 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
- Free(current_top, current_limit - current_top);
+ Free(current_top, current_limit - current_top,
+ SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
@@ -1722,6 +1815,7 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
@@ -1729,6 +1823,7 @@ void PagedSpace::SetReadAndExecutable() {
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
}
@@ -1786,7 +1881,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
- Free(limit, end - limit);
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@@ -2078,22 +2173,21 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
}
void NewSpace::UpdateLinearAllocationArea() {
- Address old_top = top();
- Address new_top = to_space_.page_low();
+ // Make sure there is no unaccounted allocations.
+ DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
+ Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
- UpdateInlineAllocationLimit(0);
- // TODO(ofrobots): It would be more correct to do a step before setting the
- // limit on the new allocation area. However, fixing this causes a regression
- // due to the idle scavenger getting pinged too frequently. crbug.com/795323.
- InlineAllocationStep(old_top, new_top, nullptr, 0);
+ StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
+ // Do a step to account for memory allocated so far before resetting.
+ InlineAllocationStep(top(), top(), nullptr, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
@@ -2121,6 +2215,10 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!Page::IsAtObjectStart(top));
+
+ // Do a step to account for memory allocated on previous page.
+ InlineAllocationStep(top, top, nullptr, 0);
+
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
@@ -2176,6 +2274,11 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
+ if (heap()->allocation_step_in_progress()) {
+ // If we are mid-way through an existing step, don't start a new one.
+ return;
+ }
+
if (AllocationObserversActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
@@ -2217,6 +2320,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
Address top_for_next_step,
Address soon_object,
size_t size) {
+ if (heap()->allocation_step_in_progress()) {
+ // Avoid starting a new step if we are mid-way through an existing one.
+ return;
+ }
+
if (top_on_previous_step_) {
if (top < top_on_previous_step_) {
// Generated code decreased the top pointer to do folded allocations.
@@ -2608,7 +2716,6 @@ void FreeListCategory::Reset() {
FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = top();
if (node == nullptr) return nullptr;
set_top(node->next());
@@ -2620,10 +2727,9 @@ FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = PickNodeFromList(node_size);
if ((node != nullptr) && (*node_size < minimum_size)) {
- Free(node, *node_size, kLinkCategory);
+ Free(node->address(), *node_size, kLinkCategory);
*node_size = 0;
return nullptr;
}
@@ -2633,7 +2739,6 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
@@ -2656,9 +2761,10 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
return nullptr;
}
-void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
+void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
- CHECK(page()->CanAllocate());
+ DCHECK(page()->CanAllocate());
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);
available_ += size_in_bytes;
@@ -2686,7 +2792,7 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+FreeList::FreeList() : wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
}
@@ -2704,11 +2810,6 @@ void FreeList::Reset() {
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- if (size_in_bytes == 0) return 0;
-
- owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
- ClearRecordedSlots::kNo);
-
Page* page = Page::FromAddress(start);
page->DecreaseAllocatedBytes(size_in_bytes);
@@ -2719,11 +2820,10 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return size_in_bytes;
}
- FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(free_space, size_in_bytes, mode);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode);
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return 0;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 08fef7d6e3..1c8bad8dc5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -139,6 +139,8 @@ enum FreeListCategoryType {
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
@@ -148,15 +150,10 @@ enum RememberedSetType {
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- static const int kSize = kIntSize + // FreeListCategoryType type_
- kIntSize + // padding for type_
- kSizetSize + // size_t available_
- kPointerSize + // FreeSpace* top_
- kPointerSize + // FreeListCategory* prev_
- kPointerSize; // FreeListCategory* next_
-
- FreeListCategory()
- : type_(kInvalidCategory),
+ FreeListCategory(FreeList* free_list, Page* page)
+ : free_list_(free_list),
+ page_(page),
+ type_(kInvalidCategory),
available_(0),
top_(nullptr),
prev_(nullptr),
@@ -180,7 +177,7 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
- void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
+ void Free(Address address, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
@@ -196,11 +193,13 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
- inline Page* page() const;
+ inline Page* page() const { return page_; }
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
+ void set_free_list(FreeList* free_list) { free_list_ = free_list; }
+
#ifdef DEBUG
size_t SumFreeList();
int FreeListLength();
@@ -218,6 +217,12 @@ class FreeListCategory {
FreeListCategory* next() { return next_; }
void set_next(FreeListCategory* next) { next_ = next; }
+ // This FreeListCategory is owned by the given free_list_.
+ FreeList* free_list_;
+
+ // This FreeListCategory holds free list entries of the given page_.
+ Page* const page_;
+
// |type_|: The type of this free list category.
FreeListCategoryType type_;
@@ -233,6 +238,8 @@ class FreeListCategory {
friend class FreeList;
friend class PagedSpace;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
// MemoryChunk represents a memory region owned by a specific space.
@@ -370,7 +377,7 @@ class MemoryChunk {
+ kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
- + FreeListCategory::kSize * kNumberOfCategories
+ + kPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories]
+ kPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // intptr_t young_generation_live_byte_count_
@@ -610,6 +617,8 @@ class MemoryChunk {
void set_owner(Space* space) { owner_.SetValue(space); }
+ bool IsPagedSpace() const;
+
void InsertAfter(MemoryChunk* other);
void Unlink();
@@ -620,8 +629,6 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
- inline void InitializeFreeListCategories();
-
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -699,7 +706,7 @@ class MemoryChunk {
// prev_chunk_ holds a pointer of type MemoryChunk
base::AtomicValue<MemoryChunk*> prev_chunk_;
- FreeListCategory categories_[kNumberOfCategories];
+ FreeListCategory* categories_[kNumberOfCategories];
LocalArrayBufferTracker* local_tracker_;
@@ -788,7 +795,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- callback(&categories_[i]);
+ callback(categories_[i]);
}
}
@@ -820,7 +827,7 @@ class Page : public MemoryChunk {
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
- return &categories_[type];
+ return categories_[type];
}
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
@@ -845,6 +852,10 @@ class Page : public MemoryChunk {
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
+ void InitializeFreeListCategories();
+ void AllocateFreeListCategories();
+ void ReleaseFreeListCategories();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1170,14 +1181,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
: heap_(heap),
allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
- concurrent_unmapping_tasks_active_(0) {
+ pending_unmapping_tasks_(0),
+ active_unmapping_tasks_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if ((chunk->size() == Page::kPageSize) &&
- (chunk->executable() != EXECUTABLE)) {
+ if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
@@ -1238,6 +1249,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
+ bool MakeRoomForNewTasks();
+
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
@@ -1247,7 +1260,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t concurrent_unmapping_tasks_active_;
+ intptr_t pending_unmapping_tasks_;
+ base::AtomicNumber<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator;
};
@@ -1359,6 +1373,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// and false otherwise.
bool CommitBlock(Address start, size_t size, Executability executable);
+ // Checks if an allocated MemoryChunk was intended to be used for executable
+ // memory.
+ bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ return executable_memory_.find(chunk) != executable_memory_.end();
+ }
+
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not nullptr, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1409,6 +1429,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
+ void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.insert(chunk);
+ }
+
+ void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.erase(chunk);
+ }
+
Isolate* isolate_;
CodeRange* code_range_;
@@ -1431,6 +1462,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
VirtualMemory last_chunk_;
Unmapper unmapper_;
+ // Data structure to remember allocated executable memory chunks.
+ std::unordered_set<MemoryChunk*> executable_memory_;
+
friend class heap::TestCodeRangeScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1731,7 +1765,7 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
- explicit FreeList(PagedSpace* owner);
+ FreeList();
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
@@ -1779,7 +1813,6 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
- PagedSpace* owner() { return owner_; }
size_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
@@ -1874,13 +1907,10 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type];
}
- PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2086,11 +2116,22 @@ class V8_EXPORT_PRIVATE PagedSpace
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
+ size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
+ if (size_in_bytes == 0) return 0;
+ heap_->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
+ ClearRecordedSlots::kNo);
+ if (mode == SpaceAccountingMode::kSpaceAccounted) {
+ return AccountedFree(start, size_in_bytes);
+ } else {
+ return UnaccountedFree(start, size_in_bytes);
+ }
+ }
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- size_t Free(Address start, size_t size_in_bytes) {
+ size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index a69abcc886..58f47f4834 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
+#ifndef V8_HEAP_STORE_BUFFER_H_
+#define V8_HEAP_STORE_BUFFER_H_
#include "src/allocation.h"
#include "src/base/logging.h"
@@ -225,4 +225,4 @@ class StoreBuffer {
} // namespace internal
} // namespace v8
-#endif // V8_STORE_BUFFER_H_
+#endif // V8_HEAP_STORE_BUFFER_H_
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
index b97c2b179c..37ebb82197 100644
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -23,4 +23,4 @@ class StressMarkingObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_MARKING_OBSERVER_H_
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
index 6f69afe4c5..b39b2eac59 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.h
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -36,4 +36,4 @@ class StressScavengeObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 25ba0df8fd..2072e407e9 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -279,8 +279,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
@@ -318,8 +318,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index 3421e16611..bb3eae2228 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_WORKLIST_
-#define V8_HEAP_WORKLIST_
+#ifndef V8_HEAP_WORKLIST_H_
+#define V8_HEAP_WORKLIST_H_
#include <cstddef>
#include <utility>
@@ -388,4 +388,4 @@ class Worklist {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_WORKLIST_
+#endif // V8_HEAP_WORKLIST_H_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 368addd718..b89dceb786 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -46,13 +46,10 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
-static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 5;
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
@@ -105,7 +102,7 @@ void RelocInfo::set_target_object(HeapObject* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -138,22 +135,22 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -161,11 +158,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
@@ -249,15 +246,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, sizeof(int32_t));
+ Assembler::FlushICache(p, sizeof(int32_t));
}
}
@@ -266,8 +261,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
@@ -299,9 +294,8 @@ void Assembler::emit_near_disp(Label* L) {
*pc_++ = disp;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -321,12 +315,6 @@ void Operand::set_disp8(int8_t disp) {
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
}
-
-Operand::Operand(Immediate imm) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(imm.immediate(), imm.rmode_);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 38508c7632..a1b8dada6e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -105,7 +105,7 @@ bool OSHasAVXSupport() {
size_t buffer_size = arraysize(buffer);
int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -207,26 +207,26 @@ Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
+void RelocInfo::set_embedded_size(uint32_t size,
ICacheFlushMode icache_flush_mode) {
Memory::uint32_at(pc_) = size;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(uint32_t));
+ Assembler::FlushICache(pc_, sizeof(uint32_t));
}
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
@@ -411,6 +411,7 @@ void Assembler::Nop(int bytes) {
switch (bytes) {
case 2:
EMIT(0x66);
+ V8_FALLTHROUGH;
case 1:
EMIT(0x90);
return;
@@ -427,6 +428,7 @@ void Assembler::Nop(int bytes) {
return;
case 6:
EMIT(0x66);
+ V8_FALLTHROUGH;
case 5:
EMIT(0xF);
EMIT(0x1F);
@@ -447,12 +449,15 @@ void Assembler::Nop(int bytes) {
case 11:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 10:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 9:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 8:
EMIT(0xF);
EMIT(0x1F);
@@ -528,8 +533,7 @@ void Assembler::push(Register src) {
EMIT(0x50 | src.code());
}
-
-void Assembler::push(const Operand& src) {
+void Assembler::push(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(esi, src);
@@ -542,8 +546,7 @@ void Assembler::pop(Register dst) {
EMIT(0x58 | dst.code());
}
-
-void Assembler::pop(const Operand& dst) {
+void Assembler::pop(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0x8F);
emit_operand(eax, dst);
@@ -563,48 +566,42 @@ void Assembler::leave() {
EMIT(0xC9);
}
-
-void Assembler::mov_b(Register dst, const Operand& src) {
+void Assembler::mov_b(Register dst, Operand src) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x8A);
emit_operand(dst, src);
}
-
-void Assembler::mov_b(const Operand& dst, const Immediate& src) {
+void Assembler::mov_b(Operand dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
EMIT(static_cast<int8_t>(src.immediate()));
}
-
-void Assembler::mov_b(const Operand& dst, Register src) {
+void Assembler::mov_b(Operand dst, Register src) {
CHECK(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x88);
emit_operand(src, dst);
}
-
-void Assembler::mov_w(Register dst, const Operand& src) {
+void Assembler::mov_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x8B);
emit_operand(dst, src);
}
-
-void Assembler::mov_w(const Operand& dst, Register src) {
+void Assembler::mov_w(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x89);
emit_operand(src, dst);
}
-
-void Assembler::mov_w(const Operand& dst, const Immediate& src) {
+void Assembler::mov_w(Operand dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0xC7);
@@ -633,8 +630,7 @@ void Assembler::mov(Register dst, Handle<HeapObject> handle) {
emit(handle);
}
-
-void Assembler::mov(Register dst, const Operand& src) {
+void Assembler::mov(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x8B);
emit_operand(dst, src);
@@ -647,62 +643,62 @@ void Assembler::mov(Register dst, Register src) {
EMIT(0xC0 | src.code() << 3 | dst.code());
}
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
+void Assembler::mov(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
EMIT(0xC7);
emit_operand(eax, dst);
emit(x);
}
-void Assembler::mov(const Operand& dst, Handle<HeapObject> handle) {
+void Assembler::mov(Operand dst, Address src, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
EMIT(0xC7);
emit_operand(eax, dst);
- emit(handle);
+ emit(reinterpret_cast<uint32_t>(src), rmode);
}
+void Assembler::mov(Operand dst, Handle<HeapObject> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
-void Assembler::mov(const Operand& dst, Register src) {
+void Assembler::mov(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x89);
emit_operand(src, dst);
}
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
+void Assembler::movsx_b(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBE);
emit_operand(dst, src);
}
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
+void Assembler::movsx_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBF);
emit_operand(dst, src);
}
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
+void Assembler::movzx_b(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB6);
emit_operand(dst, src);
}
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
+void Assembler::movzx_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB7);
emit_operand(dst, src);
}
-
-void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
@@ -747,21 +743,20 @@ void Assembler::xchg(Register dst, Register src) {
}
}
-
-void Assembler::xchg(Register dst, const Operand& src) {
+void Assembler::xchg(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x87);
emit_operand(dst, src);
}
-void Assembler::xchg_b(Register reg, const Operand& op) {
+void Assembler::xchg_b(Register reg, Operand op) {
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x86);
emit_operand(reg, op);
}
-void Assembler::xchg_w(Register reg, const Operand& op) {
+void Assembler::xchg_w(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x87);
@@ -773,14 +768,14 @@ void Assembler::lock() {
EMIT(0xF0);
}
-void Assembler::cmpxchg(const Operand& dst, Register src) {
+void Assembler::cmpxchg(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB1);
emit_operand(src, dst);
}
-void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+void Assembler::cmpxchg_b(Operand dst, Register src) {
DCHECK(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -788,7 +783,7 @@ void Assembler::cmpxchg_b(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+void Assembler::cmpxchg_w(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -803,34 +798,36 @@ void Assembler::lfence() {
EMIT(0xE8);
}
+void Assembler::pause() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x90);
+}
+
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(2, Operand(dst), Immediate(imm32));
}
-
-void Assembler::adc(Register dst, const Operand& src) {
+void Assembler::adc(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x13);
emit_operand(dst, src);
}
-
-void Assembler::add(Register dst, const Operand& src) {
+void Assembler::add(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x03);
emit_operand(dst, src);
}
-
-void Assembler::add(const Operand& dst, Register src) {
+void Assembler::add(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x01);
emit_operand(src, dst);
}
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
+void Assembler::add(Operand dst, const Immediate& x) {
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
emit_arith(0, dst, x);
@@ -847,27 +844,24 @@ void Assembler::and_(Register dst, const Immediate& x) {
emit_arith(4, Operand(dst), x);
}
-
-void Assembler::and_(Register dst, const Operand& src) {
+void Assembler::and_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x23);
emit_operand(dst, src);
}
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
+void Assembler::and_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(4, dst, x);
}
-
-void Assembler::and_(const Operand& dst, Register src) {
+void Assembler::and_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x21);
emit_operand(src, dst);
}
-void Assembler::cmpb(const Operand& op, Immediate imm8) {
+void Assembler::cmpb(Operand op, Immediate imm8) {
DCHECK(imm8.is_int8() || imm8.is_uint8());
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
@@ -879,24 +873,21 @@ void Assembler::cmpb(const Operand& op, Immediate imm8) {
emit_b(imm8);
}
-
-void Assembler::cmpb(const Operand& op, Register reg) {
+void Assembler::cmpb(Operand op, Register reg) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
emit_operand(reg, op);
}
-
-void Assembler::cmpb(Register reg, const Operand& op) {
+void Assembler::cmpb(Register reg, Operand op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
emit_operand(reg, op);
}
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
+void Assembler::cmpw(Operand op, Immediate imm16) {
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -905,14 +896,14 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
emit_w(imm16);
}
-void Assembler::cmpw(Register reg, const Operand& op) {
+void Assembler::cmpw(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x3B);
emit_operand(reg, op);
}
-void Assembler::cmpw(const Operand& op, Register reg) {
+void Assembler::cmpw(Operand op, Register reg) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x39);
@@ -929,38 +920,35 @@ void Assembler::cmp(Register reg, Handle<HeapObject> handle) {
emit_arith(7, Operand(reg), Immediate(handle));
}
-
-void Assembler::cmp(Register reg, const Operand& op) {
+void Assembler::cmp(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x3B);
emit_operand(reg, op);
}
-void Assembler::cmp(const Operand& op, Register reg) {
+void Assembler::cmp(Operand op, Register reg) {
EnsureSpace ensure_space(this);
EMIT(0x39);
emit_operand(reg, op);
}
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
+void Assembler::cmp(Operand op, const Immediate& imm) {
EnsureSpace ensure_space(this);
emit_arith(7, op, imm);
}
-void Assembler::cmp(const Operand& op, Handle<HeapObject> handle) {
+void Assembler::cmp(Operand op, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
emit_arith(7, op, Immediate(handle));
}
-
-void Assembler::cmpb_al(const Operand& op) {
+void Assembler::cmpb_al(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x38); // CMP r/m8, r8
emit_operand(eax, op); // eax has same code as register al.
}
-
-void Assembler::cmpw_ax(const Operand& op) {
+void Assembler::cmpw_ax(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x39); // CMP r/m16, r16
@@ -975,8 +963,7 @@ void Assembler::dec_b(Register dst) {
EMIT(0xC8 | dst.code());
}
-
-void Assembler::dec_b(const Operand& dst) {
+void Assembler::dec_b(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFE);
emit_operand(ecx, dst);
@@ -988,8 +975,7 @@ void Assembler::dec(Register dst) {
EMIT(0x48 | dst.code());
}
-
-void Assembler::dec(const Operand& dst) {
+void Assembler::dec(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(ecx, dst);
@@ -1001,15 +987,13 @@ void Assembler::cdq() {
EMIT(0x99);
}
-
-void Assembler::idiv(const Operand& src) {
+void Assembler::idiv(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(edi, src);
}
-
-void Assembler::div(const Operand& src) {
+void Assembler::div(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(esi, src);
@@ -1022,8 +1006,7 @@ void Assembler::imul(Register reg) {
EMIT(0xE8 | reg.code());
}
-
-void Assembler::imul(Register dst, const Operand& src) {
+void Assembler::imul(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAF);
@@ -1035,8 +1018,7 @@ void Assembler::imul(Register dst, Register src, int32_t imm32) {
imul(dst, Operand(src), imm32);
}
-
-void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
+void Assembler::imul(Register dst, Operand src, int32_t imm32) {
EnsureSpace ensure_space(this);
if (is_int8(imm32)) {
EMIT(0x6B);
@@ -1055,15 +1037,13 @@ void Assembler::inc(Register dst) {
EMIT(0x40 | dst.code());
}
-
-void Assembler::inc(const Operand& dst) {
+void Assembler::inc(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(eax, dst);
}
-
-void Assembler::lea(Register dst, const Operand& src) {
+void Assembler::lea(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x8D);
emit_operand(dst, src);
@@ -1083,8 +1063,7 @@ void Assembler::neg(Register dst) {
EMIT(0xD8 | dst.code());
}
-
-void Assembler::neg(const Operand& dst) {
+void Assembler::neg(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(ebx, dst);
@@ -1097,8 +1076,7 @@ void Assembler::not_(Register dst) {
EMIT(0xD0 | dst.code());
}
-
-void Assembler::not_(const Operand& dst) {
+void Assembler::not_(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(edx, dst);
@@ -1110,21 +1088,18 @@ void Assembler::or_(Register dst, int32_t imm32) {
emit_arith(1, Operand(dst), Immediate(imm32));
}
-
-void Assembler::or_(Register dst, const Operand& src) {
+void Assembler::or_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0B);
emit_operand(dst, src);
}
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
+void Assembler::or_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(1, dst, x);
}
-
-void Assembler::or_(const Operand& dst, Register src) {
+void Assembler::or_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x09);
emit_operand(src, dst);
@@ -1158,8 +1133,7 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
-
-void Assembler::ror(const Operand& dst, uint8_t imm8) {
+void Assembler::ror(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1172,15 +1146,13 @@ void Assembler::ror(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::ror_cl(const Operand& dst) {
+void Assembler::ror_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(ecx, dst);
}
-
-void Assembler::sar(const Operand& dst, uint8_t imm8) {
+void Assembler::sar(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1193,14 +1165,13 @@ void Assembler::sar(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::sar_cl(const Operand& dst) {
+void Assembler::sar_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(edi, dst);
}
-void Assembler::sbb(Register dst, const Operand& src) {
+void Assembler::sbb(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x1B);
emit_operand(dst, src);
@@ -1222,8 +1193,7 @@ void Assembler::shld_cl(Register dst, Register src) {
emit_operand(src, Operand(dst));
}
-
-void Assembler::shl(const Operand& dst, uint8_t imm8) {
+void Assembler::shl(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1236,14 +1206,13 @@ void Assembler::shl(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::shl_cl(const Operand& dst) {
+void Assembler::shl_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(esp, dst);
}
-void Assembler::shr(const Operand& dst, uint8_t imm8) {
+void Assembler::shr(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1256,8 +1225,7 @@ void Assembler::shr(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::shr_cl(const Operand& dst) {
+void Assembler::shr_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(ebp, dst);
@@ -1272,32 +1240,37 @@ void Assembler::shrd(Register dst, Register src, uint8_t shift) {
EMIT(shift);
}
-void Assembler::shrd_cl(const Operand& dst, Register src) {
+void Assembler::shrd_cl(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAD);
emit_operand(src, dst);
}
-void Assembler::sub(const Operand& dst, const Immediate& x) {
+void Assembler::sub(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
}
-
-void Assembler::sub(Register dst, const Operand& src) {
+void Assembler::sub(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x2B);
emit_operand(dst, src);
}
-
-void Assembler::sub(const Operand& dst, Register src) {
+void Assembler::sub(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
emit_operand(src, dst);
}
+void Assembler::sub_sp_32(uint32_t imm) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x81); // using a literal 32-bit immediate.
+ static constexpr Register ireg = Register::from_code<5>();
+ emit_operand(ireg, Operand(esp));
+ emit(imm);
+}
void Assembler::test(Register reg, const Immediate& imm) {
if (imm.is_uint8()) {
@@ -1317,23 +1290,20 @@ void Assembler::test(Register reg, const Immediate& imm) {
emit(imm);
}
-
-void Assembler::test(Register reg, const Operand& op) {
+void Assembler::test(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x85);
emit_operand(reg, op);
}
-
-void Assembler::test_b(Register reg, const Operand& op) {
+void Assembler::test_b(Register reg, Operand op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x84);
emit_operand(reg, op);
}
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
+void Assembler::test(Operand op, const Immediate& imm) {
if (op.is_reg_only()) {
test(op.reg(), imm);
return;
@@ -1365,7 +1335,7 @@ void Assembler::test_b(Register reg, Immediate imm8) {
}
}
-void Assembler::test_b(const Operand& op, Immediate imm8) {
+void Assembler::test_b(Operand op, Immediate imm8) {
if (op.is_reg_only()) {
test_b(op.reg(), imm8);
return;
@@ -1390,14 +1360,14 @@ void Assembler::test_w(Register reg, Immediate imm16) {
}
}
-void Assembler::test_w(Register reg, const Operand& op) {
+void Assembler::test_w(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x85);
emit_operand(reg, op);
}
-void Assembler::test_w(const Operand& op, Immediate imm16) {
+void Assembler::test_w(Operand op, Immediate imm16) {
DCHECK(imm16.is_int16() || imm16.is_uint16());
if (op.is_reg_only()) {
test_w(op.reg(), imm16);
@@ -1415,52 +1385,45 @@ void Assembler::xor_(Register dst, int32_t imm32) {
emit_arith(6, Operand(dst), Immediate(imm32));
}
-
-void Assembler::xor_(Register dst, const Operand& src) {
+void Assembler::xor_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x33);
emit_operand(dst, src);
}
-
-void Assembler::xor_(const Operand& dst, Register src) {
+void Assembler::xor_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x31);
emit_operand(src, dst);
}
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
+void Assembler::xor_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(6, dst, x);
}
-
-void Assembler::bt(const Operand& dst, Register src) {
+void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA3);
emit_operand(src, dst);
}
-
-void Assembler::bts(const Operand& dst, Register src) {
+void Assembler::bts(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAB);
emit_operand(src, dst);
}
-
-void Assembler::bsr(Register dst, const Operand& src) {
+void Assembler::bsr(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBD);
emit_operand(dst, src);
}
-
-void Assembler::bsf(Register dst, const Operand& src) {
+void Assembler::bsf(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBC);
@@ -1650,13 +1613,12 @@ void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) {
emit(reinterpret_cast<intptr_t>(entry), rmode);
}
-int Assembler::CallSize(const Operand& adr) {
+int Assembler::CallSize(Operand adr) {
// Call size is 1 (opcode) + adr.len_ (operand).
return 1 + adr.len_;
}
-
-void Assembler::call(const Operand& adr) {
+void Assembler::call(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
@@ -1729,8 +1691,7 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
}
}
-
-void Assembler::jmp(const Operand& adr) {
+void Assembler::jmp(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(esp, adr);
@@ -1854,94 +1815,81 @@ void Assembler::fldln2() {
EMIT(0xED);
}
-
-void Assembler::fld_s(const Operand& adr) {
+void Assembler::fld_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(eax, adr);
}
-
-void Assembler::fld_d(const Operand& adr) {
+void Assembler::fld_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(eax, adr);
}
-
-void Assembler::fstp_s(const Operand& adr) {
+void Assembler::fstp_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(ebx, adr);
}
-
-void Assembler::fst_s(const Operand& adr) {
+void Assembler::fst_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(edx, adr);
}
-
-void Assembler::fstp_d(const Operand& adr) {
+void Assembler::fstp_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ebx, adr);
}
-
-void Assembler::fst_d(const Operand& adr) {
+void Assembler::fst_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(edx, adr);
}
-
-void Assembler::fild_s(const Operand& adr) {
+void Assembler::fild_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(eax, adr);
}
-
-void Assembler::fild_d(const Operand& adr) {
+void Assembler::fild_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDF);
emit_operand(ebp, adr);
}
-
-void Assembler::fistp_s(const Operand& adr) {
+void Assembler::fistp_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ebx, adr);
}
-
-void Assembler::fisttp_s(const Operand& adr) {
+void Assembler::fisttp_s(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ecx, adr);
}
-
-void Assembler::fisttp_d(const Operand& adr) {
+void Assembler::fisttp_d(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ecx, adr);
}
-
-void Assembler::fist_s(const Operand& adr) {
+void Assembler::fist_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(edx, adr);
}
-
-void Assembler::fistp_d(const Operand& adr) {
+void Assembler::fistp_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDF);
emit_operand(edi, adr);
@@ -2034,8 +1982,7 @@ void Assembler::fsub_i(int i) {
emit_farith(0xD8, 0xE0, i);
}
-
-void Assembler::fisub_s(const Operand& adr) {
+void Assembler::fisub_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
emit_operand(esp, adr);
@@ -2211,8 +2158,7 @@ void Assembler::setcc(Condition cc, Register reg) {
EMIT(0xC0 | reg.code());
}
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
+void Assembler::cvttss2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2220,8 +2166,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
+void Assembler::cvttsd2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2238,8 +2183,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsi2ss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2247,8 +2191,7 @@ void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsi2sd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2256,8 +2199,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2265,8 +2207,7 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2274,14 +2215,14 @@ void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5B);
emit_sse_operand(dst, src);
}
-void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) {
+void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2289,7 +2230,7 @@ void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
+void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2297,8 +2238,7 @@ void Assembler::addsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+void Assembler::mulsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2306,8 +2246,7 @@ void Assembler::mulsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subsd(XMMRegister dst, const Operand& src) {
+void Assembler::subsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2315,8 +2254,7 @@ void Assembler::subsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divsd(XMMRegister dst, const Operand& src) {
+void Assembler::divsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2324,8 +2262,7 @@ void Assembler::divsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+void Assembler::xorpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2333,91 +2270,84 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andps(XMMRegister dst, const Operand& src) {
+void Assembler::andps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x54);
emit_sse_operand(dst, src);
}
-
-void Assembler::orps(XMMRegister dst, const Operand& src) {
+void Assembler::orps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x56);
emit_sse_operand(dst, src);
}
-
-void Assembler::xorps(XMMRegister dst, const Operand& src) {
+void Assembler::xorps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
emit_sse_operand(dst, src);
}
-
-void Assembler::addps(XMMRegister dst, const Operand& src) {
+void Assembler::addps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x58);
emit_sse_operand(dst, src);
}
-
-void Assembler::subps(XMMRegister dst, const Operand& src) {
+void Assembler::subps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5C);
emit_sse_operand(dst, src);
}
-
-void Assembler::mulps(XMMRegister dst, const Operand& src) {
+void Assembler::mulps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x59);
emit_sse_operand(dst, src);
}
-
-void Assembler::divps(XMMRegister dst, const Operand& src) {
+void Assembler::divps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5E);
emit_sse_operand(dst, src);
}
-void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x53);
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x52);
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, const Operand& src) {
+void Assembler::minps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5D);
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, const Operand& src) {
+void Assembler::maxps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5F);
emit_sse_operand(dst, src);
}
-void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC2);
@@ -2425,7 +2355,7 @@ void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
EMIT(cmp);
}
-void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2433,7 +2363,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::haddps(XMMRegister dst, const Operand& src) {
+void Assembler::haddps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2458,8 +2388,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::ucomisd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2510,8 +2439,7 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+void Assembler::maxsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2519,8 +2447,7 @@ void Assembler::maxsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minsd(XMMRegister dst, const Operand& src) {
+void Assembler::minsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2553,14 +2480,14 @@ void Assembler::movups(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::movups(XMMRegister dst, const Operand& src) {
+void Assembler::movups(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x10);
emit_sse_operand(dst, src);
}
-void Assembler::movups(const Operand& dst, XMMRegister src) {
+void Assembler::movups(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x11);
@@ -2576,8 +2503,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2585,8 +2511,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+void Assembler::movdqa(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2594,8 +2519,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
+void Assembler::movdqu(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2603,8 +2527,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+void Assembler::movdqu(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2612,8 +2535,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::prefetch(const Operand& src, int level) {
+void Assembler::prefetch(Operand src, int level) {
DCHECK(is_uint2(level));
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2623,8 +2545,7 @@ void Assembler::prefetch(const Operand& src, int level) {
emit_sse_operand(code, src);
}
-
-void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+void Assembler::movsd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2632,8 +2553,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
+void Assembler::movsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2641,8 +2561,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(const Operand& dst, XMMRegister src ) {
+void Assembler::movss(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2650,8 +2569,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
+void Assembler::movss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2659,8 +2577,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
+void Assembler::movd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2668,8 +2585,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(const Operand& dst, XMMRegister src) {
+void Assembler::movd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2791,7 +2707,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2800,7 +2716,7 @@ void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EMIT(shuffle);
}
-void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2809,7 +2725,7 @@ void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EMIT(shuffle);
}
-void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2820,7 +2736,7 @@ void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrw(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2831,7 +2747,7 @@ void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrd(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2842,7 +2758,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::insertps(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2853,7 +2769,7 @@ void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2864,7 +2780,7 @@ void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(is_uint8(offset));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2874,7 +2790,7 @@ void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2885,8 +2801,7 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-
-void Assembler::addss(XMMRegister dst, const Operand& src) {
+void Assembler::addss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2894,8 +2809,7 @@ void Assembler::addss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subss(XMMRegister dst, const Operand& src) {
+void Assembler::subss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2903,8 +2817,7 @@ void Assembler::subss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulss(XMMRegister dst, const Operand& src) {
+void Assembler::mulss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2912,8 +2825,7 @@ void Assembler::mulss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divss(XMMRegister dst, const Operand& src) {
+void Assembler::divss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2921,8 +2833,7 @@ void Assembler::divss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2930,16 +2841,14 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::ucomiss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x2E);
emit_sse_operand(dst, src);
}
-
-void Assembler::maxss(XMMRegister dst, const Operand& src) {
+void Assembler::maxss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2947,8 +2856,7 @@ void Assembler::maxss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minss(XMMRegister dst, const Operand& src) {
+void Assembler::minss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2959,7 +2867,7 @@ void Assembler::minss(XMMRegister dst, const Operand& src) {
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kLIG, k66, k0F38, kW1);
@@ -2967,9 +2875,8 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kLIG, k66, k0F38, kW0);
@@ -2977,37 +2884,29 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
-
-void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
}
-
-void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kNone, k0F, kWIG);
}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, k66, k0F, kWIG);
}
-void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t cmp) {
vps(0xC2, dst, src1, src2);
EMIT(cmp);
}
-void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2,
byte imm8) {
DCHECK(is_uint8(imm8));
vps(0xC6, dst, src1, src2);
@@ -3050,56 +2949,56 @@ void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
EMIT(imm8);
}
-void Assembler::vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
EMIT(shuffle);
}
-void Assembler::vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::vpshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
EMIT(shuffle);
}
-void Assembler::vpextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrb(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrw(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrd(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vinsertps(XMMRegister dst, XMMRegister src1,
- const Operand& src2, int8_t offset) {
+void Assembler::vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
+ int8_t offset) {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0xC4, dst, src1, src2, k66, k0F, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(vreg, kLZ, kNone, k0F38, kW0);
@@ -3107,8 +3006,7 @@ void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
emit_operand(reg, rm);
}
-
-void Assembler::tzcnt(Register dst, const Operand& src) {
+void Assembler::tzcnt(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3117,8 +3015,7 @@ void Assembler::tzcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::lzcnt(Register dst, const Operand& src) {
+void Assembler::lzcnt(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3127,8 +3024,7 @@ void Assembler::lzcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::popcnt(Register dst, const Operand& src) {
+void Assembler::popcnt(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3137,9 +3033,8 @@ void Assembler::popcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(vreg, kLZ, pp, k0F38, kW0);
@@ -3147,8 +3042,7 @@ void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_operand(reg, rm);
}
-
-void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorx(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -3159,7 +3053,7 @@ void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
EMIT(imm8);
}
-void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
EMIT(prefix);
@@ -3168,7 +3062,7 @@ void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::ssse3_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
@@ -3179,7 +3073,7 @@ void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -3190,9 +3084,8 @@ void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
@@ -3200,7 +3093,7 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
@@ -3320,8 +3213,7 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
}
}
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
+void Assembler::emit_operand(Register reg, Operand adr) {
const unsigned length = adr.len_;
DCHECK_GT(length, 0);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index d57e3bee71..f4e495c36b 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -233,30 +233,19 @@ enum RoundingMode {
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int x) {
+ inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE) {
value_.immediate = x;
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(const ExternalReference& ext) {
- value_.immediate = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
- }
- inline explicit Immediate(Handle<HeapObject> handle) {
- value_.immediate = reinterpret_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- }
- inline explicit Immediate(Smi* value) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(Address addr) {
- value_.immediate = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(Address x, RelocInfo::Mode rmode) {
- value_.immediate = reinterpret_cast<int32_t>(x);
rmode_ = rmode;
}
+ inline explicit Immediate(const ExternalReference& ext)
+ : Immediate(ext.address(), RelocInfo::EXTERNAL_REFERENCE) {}
+ inline explicit Immediate(Handle<HeapObject> handle)
+ : Immediate(handle.address(), RelocInfo::EMBEDDED_OBJECT) {}
+ inline explicit Immediate(Smi* value)
+ : Immediate(reinterpret_cast<intptr_t>(value)) {}
+ inline explicit Immediate(Address addr,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : Immediate(reinterpret_cast<int32_t>(addr), rmode) {}
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);
@@ -333,8 +322,7 @@ enum ScaleFactor {
times_twice_pointer_size = times_8
};
-
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// reg
INLINE(explicit Operand(Register reg)) { set_modrm(3, reg); }
@@ -352,24 +340,22 @@ class Operand BASE_EMBEDDED {
}
// [disp/r]
- INLINE(explicit Operand(Immediate imm));
+ INLINE(explicit Operand(Immediate imm)) {
+ set_modrm(0, ebp);
+ set_dispr(imm.immediate(), imm.rmode_);
+ }
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
// [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ explicit Operand(Register base, Register index, ScaleFactor scale,
+ int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
// [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ explicit Operand(Register index, ScaleFactor scale, int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
return Operand(index, scale, reinterpret_cast<int32_t>(table),
@@ -429,13 +415,17 @@ class Operand BASE_EMBEDDED {
byte buf_[6];
// The number of bytes in buf_.
- unsigned int len_;
+ uint8_t len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
+ // TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
-
+static_assert(sizeof(Operand) <= 2 * kPointerSize,
+ "Operand must be small enough to pass it by value");
+static_assert(IS_TRIVIALLY_COPYABLE(Operand),
+ "Operand must be trivially copyable to pass it by value");
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
@@ -529,7 +519,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -539,12 +529,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static constexpr int kSpecialTargetSize = kPointerSize;
@@ -610,53 +599,54 @@ class Assembler : public AssemblerBase {
void push(const Immediate& x);
void push_imm32(int32_t imm32);
void push(Register src);
- void push(const Operand& src);
+ void push(Operand src);
void pop(Register dst);
- void pop(const Operand& dst);
+ void pop(Operand dst);
void enter(const Immediate& size);
void leave();
// Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
- void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, Operand src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t src) { mov_b(dst, Immediate(src)); }
- void mov_b(const Operand& dst, const Immediate& src);
- void mov_b(const Operand& dst, Register src);
+ void mov_b(Operand dst, int8_t src) { mov_b(dst, Immediate(src)); }
+ void mov_b(Operand dst, const Immediate& src);
+ void mov_b(Operand dst, Register src);
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, int16_t src) { mov_w(dst, Immediate(src)); }
- void mov_w(const Operand& dst, const Immediate& src);
- void mov_w(const Operand& dst, Register src);
+ void mov_w(Register dst, Operand src);
+ void mov_w(Operand dst, int16_t src) { mov_w(dst, Immediate(src)); }
+ void mov_w(Operand dst, const Immediate& src);
+ void mov_w(Operand dst, Register src);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
void mov(Register dst, Handle<HeapObject> handle);
- void mov(Register dst, const Operand& src);
+ void mov(Register dst, Operand src);
void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<HeapObject> handle);
- void mov(const Operand& dst, Register src);
+ void mov(Operand dst, const Immediate& x);
+ void mov(Operand dst, Handle<HeapObject> handle);
+ void mov(Operand dst, Register src);
+ void mov(Operand dst, Address src, RelocInfo::Mode);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
- void movsx_b(Register dst, const Operand& src);
+ void movsx_b(Register dst, Operand src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
- void movsx_w(Register dst, const Operand& src);
+ void movsx_w(Register dst, Operand src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
- void movzx_b(Register dst, const Operand& src);
+ void movzx_b(Register dst, Operand src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
- void movzx_w(Register dst, const Operand& src);
+ void movzx_w(Register dst, Operand src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
- void cmov(Condition cc, Register dst, const Operand& src);
+ void cmov(Condition cc, Register dst, Operand src);
// Flag management.
void cld();
@@ -668,167 +658,170 @@ class Assembler : public AssemblerBase {
// Exchange
void xchg(Register dst, Register src);
- void xchg(Register dst, const Operand& src);
- void xchg_b(Register reg, const Operand& op);
- void xchg_w(Register reg, const Operand& op);
+ void xchg(Register dst, Operand src);
+ void xchg_b(Register reg, Operand op);
+ void xchg_w(Register reg, Operand op);
// Lock prefix
void lock();
// CompareExchange
- void cmpxchg(const Operand& dst, Register src);
- void cmpxchg_b(const Operand& dst, Register src);
- void cmpxchg_w(const Operand& dst, Register src);
+ void cmpxchg(Operand dst, Register src);
+ void cmpxchg_b(Operand dst, Register src);
+ void cmpxchg_w(Operand dst, Register src);
// Memory Fence
void lfence();
+ void pause();
+
// Arithmetics
void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
+ void adc(Register dst, Operand src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
+ void add(Register dst, Operand src);
+ void add(Operand dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
- void add(const Operand& dst, const Immediate& x);
+ void add(Operand dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
- void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
- void and_(const Operand& dst, const Immediate& x);
+ void and_(Register dst, Operand src);
+ void and_(Operand dst, Register src);
+ void and_(Operand dst, const Immediate& x);
void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, Immediate imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
+ void cmpb(Operand op, Immediate imm8);
+ void cmpb(Register reg, Operand op);
+ void cmpb(Operand op, Register reg);
void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& dst, Immediate src);
+ void cmpb_al(Operand op);
+ void cmpw_ax(Operand op);
+ void cmpw(Operand dst, Immediate src);
void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
- void cmpw(Register dst, const Operand& src);
+ void cmpw(Register dst, Operand src);
void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
- void cmpw(const Operand& dst, Register src);
+ void cmpw(Operand dst, Register src);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<HeapObject> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
- void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, Operand op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
- void cmp(const Operand& op, Register reg);
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<HeapObject> handle);
+ void cmp(Operand op, Register reg);
+ void cmp(Operand op, const Immediate& imm);
+ void cmp(Operand op, Handle<HeapObject> handle);
void dec_b(Register dst);
- void dec_b(const Operand& dst);
+ void dec_b(Operand dst);
void dec(Register dst);
- void dec(const Operand& dst);
+ void dec(Operand dst);
void cdq();
void idiv(Register src) { idiv(Operand(src)); }
- void idiv(const Operand& src);
+ void idiv(Operand src);
void div(Register src) { div(Operand(src)); }
- void div(const Operand& src);
+ void div(Operand src);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
- void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Operand src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
- void imul(Register dst, const Operand& src, int32_t imm32);
+ void imul(Register dst, Operand src, int32_t imm32);
void inc(Register dst);
- void inc(const Operand& dst);
+ void inc(Operand dst);
- void lea(Register dst, const Operand& src);
+ void lea(Register dst, Operand src);
// Unsigned multiply instruction.
void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);
- void neg(const Operand& dst);
+ void neg(Operand dst);
void not_(Register dst);
- void not_(const Operand& dst);
+ void not_(Operand dst);
void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
+ void or_(Register dst, Operand src);
+ void or_(Operand dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
- void or_(const Operand& dst, const Immediate& x);
+ void or_(Operand dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
- void ror(const Operand& dst, uint8_t imm8);
+ void ror(Operand dst, uint8_t imm8);
void ror_cl(Register dst) { ror_cl(Operand(dst)); }
- void ror_cl(const Operand& dst);
+ void ror_cl(Operand dst);
void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
- void sar(const Operand& dst, uint8_t imm8);
+ void sar(Operand dst, uint8_t imm8);
void sar_cl(Register dst) { sar_cl(Operand(dst)); }
- void sar_cl(const Operand& dst);
+ void sar_cl(Operand dst);
- void sbb(Register dst, const Operand& src);
+ void sbb(Register dst, Operand src);
void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
- void shl(const Operand& dst, uint8_t imm8);
+ void shl(Operand dst, uint8_t imm8);
void shl_cl(Register dst) { shl_cl(Operand(dst)); }
- void shl_cl(const Operand& dst);
+ void shl_cl(Operand dst);
void shld(Register dst, Register src, uint8_t shift);
void shld_cl(Register dst, Register src);
void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
- void shr(const Operand& dst, uint8_t imm8);
+ void shr(Operand dst, uint8_t imm8);
void shr_cl(Register dst) { shr_cl(Operand(dst)); }
- void shr_cl(const Operand& dst);
+ void shr_cl(Operand dst);
void shrd(Register dst, Register src, uint8_t shift);
void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
- void shrd_cl(const Operand& dst, Register src);
+ void shrd_cl(Operand dst, Register src);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
- void sub(const Operand& dst, const Immediate& x);
+ void sub(Operand dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
+ void sub(Register dst, Operand src);
+ void sub(Operand dst, Register src);
+ void sub_sp_32(uint32_t imm);
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test(const Operand& op, Register reg) { test(reg, op); }
- void test_b(Register reg, const Operand& op);
+ void test(Register reg, Operand op);
+ void test(Operand op, const Immediate& imm);
+ void test(Operand op, Register reg) { test(reg, op); }
+ void test_b(Register reg, Operand op);
void test_b(Register reg, Immediate imm8);
- void test_b(const Operand& op, Immediate imm8);
- void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+ void test_b(Operand op, Immediate imm8);
+ void test_b(Operand op, Register reg) { test_b(reg, op); }
void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
- void test_w(Register reg, const Operand& op);
+ void test_w(Register reg, Operand op);
void test_w(Register reg, Immediate imm16);
- void test_w(const Operand& op, Immediate imm16);
- void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+ void test_w(Operand op, Immediate imm16);
+ void test_w(Operand op, Register reg) { test_w(reg, op); }
void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, Operand src);
+ void xor_(Operand dst, Register src);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
- void xor_(const Operand& dst, const Immediate& x);
+ void xor_(Operand dst, const Immediate& x);
// Bit operations.
- void bt(const Operand& dst, Register src);
+ void bt(Operand dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
- void bts(const Operand& dst, Register src);
+ void bts(Operand dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
- void bsr(Register dst, const Operand& src);
+ void bsr(Register dst, Operand src);
void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
- void bsf(Register dst, const Operand& src);
+ void bsf(Register dst, Operand src);
// Miscellaneous
void hlt();
@@ -857,9 +850,9 @@ class Assembler : public AssemblerBase {
// Calls
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
- int CallSize(const Operand& adr);
+ int CallSize(Operand adr);
void call(Register reg) { call(Operand(reg)); }
- void call(const Operand& adr);
+ void call(Operand adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
@@ -870,7 +863,7 @@ class Assembler : public AssemblerBase {
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
- void jmp(const Operand& adr);
+ void jmp(Operand adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Conditional jumps
@@ -890,25 +883,25 @@ class Assembler : public AssemblerBase {
void fldpi();
void fldln2();
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
+ void fld_s(Operand adr);
+ void fld_d(Operand adr);
- void fstp_s(const Operand& adr);
- void fst_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
+ void fstp_s(Operand adr);
+ void fst_s(Operand adr);
+ void fstp_d(Operand adr);
+ void fst_d(Operand adr);
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
+ void fild_s(Operand adr);
+ void fild_d(Operand adr);
- void fist_s(const Operand& adr);
+ void fist_s(Operand adr);
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
+ void fistp_s(Operand adr);
+ void fistp_d(Operand adr);
// The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
+ void fisttp_s(Operand adr);
+ void fisttp_d(Operand adr);
void fabs();
void fchs();
@@ -929,7 +922,7 @@ class Assembler : public AssemblerBase {
void fdiv(int i);
void fdiv_i(int i);
- void fisub_s(const Operand& adr);
+ void fisub_s(Operand adr);
void faddp(int i = 1);
void fsubp(int i = 1);
@@ -962,62 +955,62 @@ class Assembler : public AssemblerBase {
// SSE instructions
void addss(XMMRegister dst, XMMRegister src) { addss(dst, Operand(src)); }
- void addss(XMMRegister dst, const Operand& src);
+ void addss(XMMRegister dst, Operand src);
void subss(XMMRegister dst, XMMRegister src) { subss(dst, Operand(src)); }
- void subss(XMMRegister dst, const Operand& src);
+ void subss(XMMRegister dst, Operand src);
void mulss(XMMRegister dst, XMMRegister src) { mulss(dst, Operand(src)); }
- void mulss(XMMRegister dst, const Operand& src);
+ void mulss(XMMRegister dst, Operand src);
void divss(XMMRegister dst, XMMRegister src) { divss(dst, Operand(src)); }
- void divss(XMMRegister dst, const Operand& src);
+ void divss(XMMRegister dst, Operand src);
void sqrtss(XMMRegister dst, XMMRegister src) { sqrtss(dst, Operand(src)); }
- void sqrtss(XMMRegister dst, const Operand& src);
+ void sqrtss(XMMRegister dst, Operand src);
void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
- void ucomiss(XMMRegister dst, const Operand& src);
+ void ucomiss(XMMRegister dst, Operand src);
void movaps(XMMRegister dst, XMMRegister src);
void movups(XMMRegister dst, XMMRegister src);
- void movups(XMMRegister dst, const Operand& src);
- void movups(const Operand& dst, XMMRegister src);
+ void movups(XMMRegister dst, Operand src);
+ void movups(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
- void maxss(XMMRegister dst, const Operand& src);
+ void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
- void minss(XMMRegister dst, const Operand& src);
+ void minss(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, Operand src);
void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
- void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, Operand src);
void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
- void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, Operand src);
void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
- void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, Operand src);
void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
- void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, Operand src);
void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
- void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, Operand src);
void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
- void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, Operand src);
void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
- void rcpps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
- void rsqrtps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
- void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
- void minps(XMMRegister dst, const Operand& src);
+ void minps(XMMRegister dst, Operand src);
void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
- void maxps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, Operand src);
void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
- void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmpps(XMMRegister dst, Operand src, int8_t cmp);
#define SSE_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src) { \
cmpps(dst, Operand(src), imm8); \
} \
- void instr##ps(XMMRegister dst, const Operand& src) { cmpps(dst, src, imm8); }
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }
SSE_CMP_P(cmpeq, 0x0);
SSE_CMP_P(cmplt, 0x1);
@@ -1027,54 +1020,55 @@ class Assembler : public AssemblerBase {
#undef SSE_CMP_P
// SSE2 instructions
- void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, Operand src);
void cvttss2si(Register dst, XMMRegister src) {
cvttss2si(dst, Operand(src));
}
- void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src) {
cvttsd2si(dst, Operand(src));
}
void cvtsd2si(Register dst, XMMRegister src);
void cvtsi2ss(XMMRegister dst, Register src) { cvtsi2ss(dst, Operand(src)); }
- void cvtsi2ss(XMMRegister dst, const Operand& src);
+ void cvtsi2ss(XMMRegister dst, Operand src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
- void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtsi2sd(XMMRegister dst, Operand src);
+ void cvtss2sd(XMMRegister dst, Operand src);
void cvtss2sd(XMMRegister dst, XMMRegister src) {
cvtss2sd(dst, Operand(src));
}
- void cvtsd2ss(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, Operand src);
void cvtsd2ss(XMMRegister dst, XMMRegister src) {
cvtsd2ss(dst, Operand(src));
}
void cvtdq2ps(XMMRegister dst, XMMRegister src) {
cvtdq2ps(dst, Operand(src));
}
- void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, Operand src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
}
- void cvttps2dq(XMMRegister dst, const Operand& src);
+ void cvttps2dq(XMMRegister dst, Operand src);
void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
- void addsd(XMMRegister dst, const Operand& src);
+ void addsd(XMMRegister dst, Operand src);
void subsd(XMMRegister dst, XMMRegister src) { subsd(dst, Operand(src)); }
- void subsd(XMMRegister dst, const Operand& src);
+ void subsd(XMMRegister dst, Operand src);
void mulsd(XMMRegister dst, XMMRegister src) { mulsd(dst, Operand(src)); }
- void mulsd(XMMRegister dst, const Operand& src);
+ void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
- void divsd(XMMRegister dst, const Operand& src);
- void xorpd(XMMRegister dst, XMMRegister src);
+ void divsd(XMMRegister dst, Operand src);
+ void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); }
+ void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
- void sqrtsd(XMMRegister dst, const Operand& src);
+ void sqrtsd(XMMRegister dst, Operand src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
- void ucomisd(XMMRegister dst, const Operand& src);
+ void ucomisd(XMMRegister dst, Operand src);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1085,15 +1079,15 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
- void maxsd(XMMRegister dst, const Operand& src);
+ void maxsd(XMMRegister dst, Operand src);
void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
- void minsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, Operand src);
- void movdqa(XMMRegister dst, const Operand& src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
- void movdqu(const Operand& dst, XMMRegister src);
- void movdq(bool aligned, XMMRegister dst, const Operand& src) {
+ void movdqa(XMMRegister dst, Operand src);
+ void movdqa(Operand dst, XMMRegister src);
+ void movdqu(XMMRegister dst, Operand src);
+ void movdqu(Operand dst, XMMRegister src);
+ void movdq(bool aligned, XMMRegister dst, Operand src) {
if (aligned) {
movdqa(dst, src);
} else {
@@ -1102,16 +1096,15 @@ class Assembler : public AssemblerBase {
}
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
- void movd(XMMRegister dst, const Operand& src);
+ void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void movd(const Operand& dst, XMMRegister src);
+ void movd(Operand dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
+ void movsd(XMMRegister dst, Operand src);
+ void movsd(Operand dst, XMMRegister src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movss(XMMRegister dst, Operand src);
+ void movss(Operand dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
@@ -1131,42 +1124,42 @@ class Assembler : public AssemblerBase {
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
pshuflw(dst, Operand(src), shuffle);
}
- void pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
pshufd(dst, Operand(src), shuffle);
}
- void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void pextrb(Register dst, XMMRegister src, int8_t offset) {
pextrb(Operand(dst), src, offset);
}
- void pextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrb(Operand dst, XMMRegister src, int8_t offset);
// Use SSE4_1 encoding for pextrw reg, xmm, imm8 for consistency
void pextrw(Register dst, XMMRegister src, int8_t offset) {
pextrw(Operand(dst), src, offset);
}
- void pextrw(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrw(Operand dst, XMMRegister src, int8_t offset);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
- void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrd(Operand dst, XMMRegister src, int8_t offset);
void insertps(XMMRegister dst, XMMRegister src, int8_t offset) {
insertps(dst, Operand(src), offset);
}
- void insertps(XMMRegister dst, const Operand& src, int8_t offset);
+ void insertps(XMMRegister dst, Operand src, int8_t offset);
void pinsrb(XMMRegister dst, Register src, int8_t offset) {
pinsrb(dst, Operand(src), offset);
}
- void pinsrb(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrb(XMMRegister dst, Operand src, int8_t offset);
void pinsrw(XMMRegister dst, Register src, int8_t offset) {
pinsrw(dst, Operand(src), offset);
}
- void pinsrw(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrw(XMMRegister dst, Operand src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
- void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrd(XMMRegister dst, Operand src, int8_t offset);
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1178,13 +1171,13 @@ class Assembler : public AssemblerBase {
void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd231sd(dst, src1, Operand(src2));
}
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x99, dst, src1, src2);
}
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xa9, dst, src1, src2);
}
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xb9, dst, src1, src2);
}
void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1196,13 +1189,13 @@ class Assembler : public AssemblerBase {
void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmsub231sd(dst, src1, Operand(src2));
}
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9b, dst, src1, src2);
}
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xab, dst, src1, src2);
}
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbb, dst, src1, src2);
}
void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1214,13 +1207,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmadd231sd(dst, src1, Operand(src2));
}
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9d, dst, src1, src2);
}
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xad, dst, src1, src2);
}
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbd, dst, src1, src2);
}
void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1232,16 +1225,16 @@ class Assembler : public AssemblerBase {
void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmsub231sd(dst, src1, Operand(src2));
}
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9f, dst, src1, src2);
}
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xaf, dst, src1, src2);
}
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbf, dst, src1, src2);
}
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132ss(dst, src1, Operand(src2));
@@ -1252,13 +1245,13 @@ class Assembler : public AssemblerBase {
void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd231ss(dst, src1, Operand(src2));
}
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x99, dst, src1, src2);
}
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xa9, dst, src1, src2);
}
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xb9, dst, src1, src2);
}
void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1270,13 +1263,13 @@ class Assembler : public AssemblerBase {
void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmsub231ss(dst, src1, Operand(src2));
}
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9b, dst, src1, src2);
}
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xab, dst, src1, src2);
}
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbb, dst, src1, src2);
}
void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1288,13 +1281,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmadd231ss(dst, src1, Operand(src2));
}
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9d, dst, src1, src2);
}
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xad, dst, src1, src2);
}
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbd, dst, src1, src2);
}
void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1306,101 +1299,101 @@ class Assembler : public AssemblerBase {
void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmsub231ss(dst, src1, Operand(src2));
}
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9f, dst, src1, src2);
}
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xaf, dst, src1, src2);
}
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbf, dst, src1, src2);
}
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddsd(dst, src1, Operand(src2));
}
- void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vaddsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x58, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsubsd(dst, src1, Operand(src2));
}
- void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsubsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5c, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmulsd(dst, src1, Operand(src2));
}
- void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmulsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x59, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vdivsd(dst, src1, Operand(src2));
}
- void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vdivsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5e, dst, src1, src2);
}
void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmaxsd(dst, src1, Operand(src2));
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmaxsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5f, dst, src1, src2);
}
void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vminsd(dst, src1, Operand(src2));
}
- void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vminsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5d, dst, src1, src2);
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
- void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vaddss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x58, dst, src1, src2);
}
void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsubss(dst, src1, Operand(src2));
}
- void vsubss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsubss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5c, dst, src1, src2);
}
void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmulss(dst, src1, Operand(src2));
}
- void vmulss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmulss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x59, dst, src1, src2);
}
void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vdivss(dst, src1, Operand(src2));
}
- void vdivss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vdivss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5e, dst, src1, src2);
}
void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmaxss(dst, src1, Operand(src2));
}
- void vmaxss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmaxss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5f, dst, src1, src2);
}
void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vminss(dst, src1, Operand(src2));
}
- void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vminss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5d, dst, src1, src2);
}
- void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
- void vrcpps(XMMRegister dst, const Operand& src) {
+ void vrcpps(XMMRegister dst, Operand src) {
vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
}
void vrsqrtps(XMMRegister dst, XMMRegister src) {
vrsqrtps(dst, Operand(src));
}
- void vrsqrtps(XMMRegister dst, const Operand& src) {
+ void vrsqrtps(XMMRegister dst, Operand src) {
vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
}
void vmovaps(XMMRegister dst, XMMRegister src) {
@@ -1409,8 +1402,7 @@ class Assembler : public AssemblerBase {
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vshufps(dst, src1, Operand(src2), imm8);
}
- void vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- byte imm8);
+ void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1422,75 +1414,72 @@ class Assembler : public AssemblerBase {
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshuflw(dst, Operand(src), shuffle);
}
- void vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void vpshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshufd(dst, Operand(src), shuffle);
}
- void vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void vpshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void vpextrb(Register dst, XMMRegister src, int8_t offset) {
vpextrb(Operand(dst), src, offset);
}
- void vpextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrb(Operand dst, XMMRegister src, int8_t offset);
void vpextrw(Register dst, XMMRegister src, int8_t offset) {
vpextrw(Operand(dst), src, offset);
}
- void vpextrw(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrw(Operand dst, XMMRegister src, int8_t offset);
void vpextrd(Register dst, XMMRegister src, int8_t offset) {
vpextrd(Operand(dst), src, offset);
}
- void vpextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrd(Operand dst, XMMRegister src, int8_t offset);
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
int8_t offset) {
vinsertps(dst, src1, Operand(src2), offset);
}
- void vinsertps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset);
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrb(dst, src1, Operand(src2), offset);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrw(dst, src1, Operand(src2), offset);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrd(dst, src1, Operand(src2), offset);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
- void vcvtdq2ps(XMMRegister dst, const Operand& src) {
+ void vcvtdq2ps(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
}
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vcvttps2dq(dst, Operand(src));
}
- void vcvttps2dq(XMMRegister dst, const Operand& src) {
+ void vcvttps2dq(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vmovdqu(XMMRegister dst, const Operand& src) {
+ void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vmovdqu(const Operand& dst, XMMRegister src) {
+ void vmovdqu(Operand dst, XMMRegister src) {
vinstr(0x7F, src, xmm0, dst, kF3, k0F, kWIG);
}
void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
- void vmovd(XMMRegister dst, const Operand& src) {
+ void vmovd(XMMRegister dst, Operand src) {
vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
}
void vmovd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void vmovd(const Operand& dst, XMMRegister src) {
+ void vmovd(Operand dst, XMMRegister src) {
vinstr(0x7E, src, xmm0, dst, k66, k0F, kWIG);
}
@@ -1498,76 +1487,76 @@ class Assembler : public AssemblerBase {
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
}
- void andn(Register dst, Register src1, const Operand& src2) {
+ void andn(Register dst, Register src1, Operand src2) {
bmi1(0xf2, dst, src1, src2);
}
void bextr(Register dst, Register src1, Register src2) {
bextr(dst, Operand(src1), src2);
}
- void bextr(Register dst, const Operand& src1, Register src2) {
+ void bextr(Register dst, Operand src1, Register src2) {
bmi1(0xf7, dst, src2, src1);
}
void blsi(Register dst, Register src) { blsi(dst, Operand(src)); }
- void blsi(Register dst, const Operand& src) { bmi1(0xf3, ebx, dst, src); }
+ void blsi(Register dst, Operand src) { bmi1(0xf3, ebx, dst, src); }
void blsmsk(Register dst, Register src) { blsmsk(dst, Operand(src)); }
- void blsmsk(Register dst, const Operand& src) { bmi1(0xf3, edx, dst, src); }
+ void blsmsk(Register dst, Operand src) { bmi1(0xf3, edx, dst, src); }
void blsr(Register dst, Register src) { blsr(dst, Operand(src)); }
- void blsr(Register dst, const Operand& src) { bmi1(0xf3, ecx, dst, src); }
+ void blsr(Register dst, Operand src) { bmi1(0xf3, ecx, dst, src); }
void tzcnt(Register dst, Register src) { tzcnt(dst, Operand(src)); }
- void tzcnt(Register dst, const Operand& src);
+ void tzcnt(Register dst, Operand src);
void lzcnt(Register dst, Register src) { lzcnt(dst, Operand(src)); }
- void lzcnt(Register dst, const Operand& src);
+ void lzcnt(Register dst, Operand src);
void popcnt(Register dst, Register src) { popcnt(dst, Operand(src)); }
- void popcnt(Register dst, const Operand& src);
+ void popcnt(Register dst, Operand src);
void bzhi(Register dst, Register src1, Register src2) {
bzhi(dst, Operand(src1), src2);
}
- void bzhi(Register dst, const Operand& src1, Register src2) {
+ void bzhi(Register dst, Operand src1, Register src2) {
bmi2(kNone, 0xf5, dst, src2, src1);
}
void mulx(Register dst1, Register dst2, Register src) {
mulx(dst1, dst2, Operand(src));
}
- void mulx(Register dst1, Register dst2, const Operand& src) {
+ void mulx(Register dst1, Register dst2, Operand src) {
bmi2(kF2, 0xf6, dst1, dst2, src);
}
void pdep(Register dst, Register src1, Register src2) {
pdep(dst, src1, Operand(src2));
}
- void pdep(Register dst, Register src1, const Operand& src2) {
+ void pdep(Register dst, Register src1, Operand src2) {
bmi2(kF2, 0xf5, dst, src1, src2);
}
void pext(Register dst, Register src1, Register src2) {
pext(dst, src1, Operand(src2));
}
- void pext(Register dst, Register src1, const Operand& src2) {
+ void pext(Register dst, Register src1, Operand src2) {
bmi2(kF3, 0xf5, dst, src1, src2);
}
void sarx(Register dst, Register src1, Register src2) {
sarx(dst, Operand(src1), src2);
}
- void sarx(Register dst, const Operand& src1, Register src2) {
+ void sarx(Register dst, Operand src1, Register src2) {
bmi2(kF3, 0xf7, dst, src2, src1);
}
void shlx(Register dst, Register src1, Register src2) {
shlx(dst, Operand(src1), src2);
}
- void shlx(Register dst, const Operand& src1, Register src2) {
+ void shlx(Register dst, Operand src1, Register src2) {
bmi2(k66, 0xf7, dst, src2, src1);
}
void shrx(Register dst, Register src1, Register src2) {
shrx(dst, Operand(src1), src2);
}
- void shrx(Register dst, const Operand& src1, Register src2) {
+ void shrx(Register dst, Operand src1, Register src2) {
bmi2(kF2, 0xf7, dst, src2, src1);
}
void rorx(Register dst, Register src, byte imm8) {
rorx(dst, Operand(src), imm8);
}
- void rorx(Register dst, const Operand& src, byte imm8);
+ void rorx(Register dst, Operand src, byte imm8);
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
@@ -1579,34 +1568,31 @@ class Assembler : public AssemblerBase {
V(div, 0x5e) \
V(max, 0x5f)
-#define AVX_PACKED_OP_DECLARE(name, opcode) \
- void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vps(opcode, dst, src1, Operand(src2)); \
- } \
- void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vpd(opcode, dst, src1, Operand(src2)); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vpd(opcode, dst, src1, src2); \
+#define AVX_PACKED_OP_DECLARE(name, opcode) \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vps(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vps(opcode, dst, src1, src2); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vpd(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vpd(opcode, dst, src1, src2); \
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
- void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp);
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, Operand(src2), imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmpps(dst, src1, src2, imm8); \
+ void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp);
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, Operand(src2), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
}
AVX_CMP_P(vcmpeq, 0x0);
@@ -1621,7 +1607,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
}
@@ -1632,8 +1618,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
v##instruction(dst, src1, Operand(src2)); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
}
@@ -1645,7 +1630,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1657,7 +1642,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1669,8 +1654,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
v##instruction(dst, src1, Operand(src2)); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
}
@@ -1681,7 +1665,7 @@ class Assembler : public AssemblerBase {
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
- void prefetch(const Operand& src, int level);
+ void prefetch(Operand src, int level);
// TODO(lrn): Need SFENCE for movnt?
// Check the code size generated from label to here.
@@ -1735,7 +1719,7 @@ class Assembler : public AssemblerBase {
}
protected:
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister reg, Operand adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, Register src);
@@ -1774,7 +1758,7 @@ class Assembler : public AssemblerBase {
// sel specifies the /n in the modrm byte (see the Intel PRM).
void emit_arith(int sel, Operand dst, const Immediate& x);
- void emit_operand(Register reg, const Operand& adr);
+ void emit_operand(Register reg, Operand adr);
void emit_label(Label* label);
@@ -1800,18 +1784,17 @@ class Assembler : public AssemblerBase {
inline void emit_disp(Label* L, Displacement::Type type);
inline void emit_near_disp(Label* L);
- void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+ void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
- void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
- void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
- void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
// Most BMI instructions are similar.
- void bmi1(byte op, Register reg, Register vreg, const Operand& rm);
- void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi1(byte op, Register reg, Register vreg, Operand rm);
+ void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 697539713a..bdae590078 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -163,7 +163,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() != INTEGER) {
Label fast_power, try_arithmetic_simplification;
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
&try_arithmetic_simplification,
&try_arithmetic_simplification);
__ jmp(&int_exponent);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 8bd6b5f30c..ffe3c9150b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -35,9 +35,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -447,8 +447,8 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 6ce62e93bb..ad394020e5 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -2564,6 +2564,11 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(buffer, instruction);
}
+int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerIA32 d(converter_, true /*crash if unimplemented*/);
+ return d.InstructionDecode(buffer, instruction);
+}
// The IA-32 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index b745a19466..a262b92af9 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_FRAMES_IA32_H_
-#define V8_IA32_FRAMES_IA32_H_
+#ifndef V8_IA32_FRAME_CONSTANTS_IA32_H_
+#define V8_IA32_FRAME_CONSTANTS_IA32_H_
namespace v8 {
namespace internal {
@@ -50,4 +50,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_FRAMES_IA32_H_
+#endif // V8_IA32_FRAME_CONSTANTS_IA32_H_
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 9edad9a44c..a4c6894ae8 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -69,13 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // SharedFunctionInfo, vector, slot index.
- Register registers[] = {ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ebc8b39ab9..81e91f1e4f 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -14,6 +14,7 @@
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/runtime/runtime.h"
#include "src/ia32/assembler-ia32-inl.h"
@@ -79,9 +80,7 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
}
}
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -183,33 +182,17 @@ void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan,
- Label* minus_zero, Label::Distance dst) {
+ XMMRegister scratch, Label* lost_precision,
+ Label* is_nan, Label::Distance dst) {
DCHECK(input_reg != scratch);
cvttsd2si(result_reg, Operand(input_reg));
Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // jump to minus_zero.
- and_(result_reg, 1);
- j(not_zero, minus_zero, dst);
- bind(&done);
- }
}
-void TurboAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
+void TurboAssembler::LoadUint32(XMMRegister dst, Operand src) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@@ -385,7 +368,7 @@ void MacroAssembler::MaybeDropFrames() {
RelocInfo::CODE_TARGET);
}
-void TurboAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2sd(dst, src);
}
@@ -595,10 +578,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(Immediate(StackFrame::TypeToMarker(type)));
if (type == StackFrame::INTERNAL) {
push(Immediate(CodeObject()));
- }
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ // Check at runtime that this code object was patched correctly.
+ if (emit_debug_code()) {
+ cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ }
}
}
@@ -862,6 +846,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ Address bytes_address = reinterpret_cast<Address>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Immediate(bytes_address, RelocInfo::NONE));
+ jmp(kOffHeapTrampolineRegister);
+}
+
void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count, Register caller_args_count_reg,
Register scratch0, Register scratch1,
@@ -1001,11 +991,27 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ cmpb(Operand::StaticVariable(debug_is_active), Immediate(0));
+ j(equal, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
- j(equal, &skip_hook);
+ j(not_equal, &call_hook);
+
+ Register scratch = ecx;
+ mov(scratch, FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ mov(scratch, FieldOperand(scratch, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(scratch, &skip_hook);
+ mov(scratch, FieldOperand(scratch, DebugInfo::kFlagsOffset));
+ test(scratch, Immediate(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ j(zero, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1064,6 +1070,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1104,14 +1111,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFunctionCode(edi, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(edi, function);
- InvokeFunction(edi, expected, actual, flag);
-}
-
void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, NativeContextOperand());
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
@@ -1159,17 +1158,14 @@ void TurboAssembler::Move(Register dst, Register src) {
}
void TurboAssembler::Move(Register dst, const Immediate& x) {
- if (!x.is_heap_object_request() && x.is_zero() &&
- RelocInfo::IsNone(x.rmode())) {
+ if (!x.is_heap_object_request() && x.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
}
}
-void TurboAssembler::Move(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
+void TurboAssembler::Move(Operand dst, const Immediate& x) { mov(dst, x); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> object) {
mov(dst, object);
@@ -1224,7 +1220,9 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
push(eax);
Move(eax, Immediate(lower));
movd(dst, Operand(eax));
- Move(eax, Immediate(upper));
+ if (upper != lower) {
+ Move(eax, Immediate(upper));
+ }
pinsrd(dst, Operand(eax), 1);
pop(eax);
} else {
@@ -1236,8 +1234,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
- uint8_t shuffle) {
+void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshuflw(dst, src, shuffle);
@@ -1246,8 +1243,7 @@ void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
}
}
-void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
- uint8_t shuffle) {
+void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshufd(dst, src, shuffle);
@@ -1256,7 +1252,7 @@ void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
}
}
-void TurboAssembler::Psignb(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignb(dst, dst, src);
@@ -1270,7 +1266,7 @@ void TurboAssembler::Psignb(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Psignw(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignw(dst, dst, src);
@@ -1284,7 +1280,7 @@ void TurboAssembler::Psignw(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Psignd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignd(dst, dst, src);
@@ -1298,7 +1294,7 @@ void TurboAssembler::Psignd(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Pshufb(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshufb(dst, dst, src);
@@ -1360,7 +1356,7 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
movd(dst, xmm0);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
bool is_64_bits) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -1388,7 +1384,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
}
}
-void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src);
@@ -1402,7 +1398,7 @@ void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
-void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Tzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src);
@@ -1415,7 +1411,7 @@ void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
bind(&not_zero_src);
}
-void TurboAssembler::Popcnt(Register dst, const Operand& src) {
+void TurboAssembler::Popcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src);
@@ -1548,6 +1544,59 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
+void TurboAssembler::RetpolineCall(Register reg) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), reg);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), destination, rmode);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineJump(Register reg) {
+ Label setup_target, capture_spec;
+
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), reg);
+ ret(0);
+}
#ifdef DEBUG
bool AreAliased(Register reg1,
@@ -1596,6 +1645,22 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // In order to get the address of the current instruction, we first need
+ // to use a call and then use a pop, thus pushing the return address to
+ // the stack and then popping it into the register.
+ Label current;
+ call(&current);
+ int pc = pc_offset();
+ bind(&current);
+ pop(dst);
+ if (pc != 0) {
+ sub(dst, Immediate(pc));
+ }
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6242333847..ce299ba5a7 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -20,12 +20,15 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
+constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax;
-constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
+constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
constexpr Register kJavaScriptCallArgCountRegister = eax;
+constexpr Register kJavaScriptCallCodeStartRegister = ecx;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
+constexpr Register kOffHeapTrampolineRegister = ecx;
constexpr Register kRuntimeCallFunctionRegister = ebx;
constexpr Register kRuntimeCallArgCountRegister = eax;
@@ -100,7 +103,7 @@ class TurboAssembler : public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- void Move(const Operand& dst, const Immediate& x);
+ void Move(Operand dst, const Immediate& x);
// Move an immediate into an XMM register.
void Move(XMMRegister dst, uint32_t src);
@@ -113,6 +116,11 @@ class TurboAssembler : public Assembler {
void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
void Call(Label* target) { call(target); }
+ void RetpolineCall(Register reg);
+ void RetpolineCall(Address destination, RelocInfo::Mode rmode);
+
+ void RetpolineJump(Register reg);
+
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
call(target, rmode);
}
@@ -180,13 +188,13 @@ class TurboAssembler : public Assembler {
void Prologue();
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
- void Lzcnt(Register dst, const Operand& src);
+ void Lzcnt(Register dst, Operand src);
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
- void Tzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Operand src);
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
- void Popcnt(Register dst, const Operand& src);
+ void Popcnt(Register dst, Operand src);
void Ret();
@@ -197,11 +205,11 @@ class TurboAssembler : public Assembler {
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshuflw(dst, Operand(src), shuffle);
}
- void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshufd(dst, Operand(src), shuffle);
}
- void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -214,12 +222,12 @@ class TurboAssembler : public Assembler {
} \
}
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, const Operand&, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
- AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
#undef AVX_OP2_WITH_TYPE
@@ -236,27 +244,29 @@ class TurboAssembler : public Assembler {
}
#define AVX_OP3_XO(macro_name, name) \
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, const Operand&)
+ AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
AVX_OP3_XO(Pxor, pxor)
+ AVX_OP3_XO(Xorps, xorps)
+ AVX_OP3_XO(Xorpd, xorpd)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
// Non-SSE2 instructions.
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
- void Pshufb(XMMRegister dst, const Operand& src);
+ void Pshufb(XMMRegister dst, Operand src);
void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
- void Psignb(XMMRegister dst, const Operand& src);
+ void Psignb(XMMRegister dst, Operand src);
void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
- void Psignw(XMMRegister dst, const Operand& src);
+ void Psignw(XMMRegister dst, Operand src);
void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
- void Psignd(XMMRegister dst, const Operand& src);
+ void Psignd(XMMRegister dst, Operand src);
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
@@ -265,27 +275,27 @@ class TurboAssembler : public Assembler {
bool is_64_bits = false) {
Pinsrd(dst, Operand(src), imm8, is_64_bits);
}
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+ void Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
bool is_64_bits = false);
void LoadUint32(XMMRegister dst, Register src) {
LoadUint32(dst, Operand(src));
}
- void LoadUint32(XMMRegister dst, const Operand& src);
+ void LoadUint32(XMMRegister dst, Operand src);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// xorps to clear the dst register before cvtsi2sd to solve this issue.
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
- void Cvtsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtsi2sd(XMMRegister dst, Operand src);
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
void SlowTruncateToIDelayed(Zone* zone, Register result_reg);
void Push(Register src) { push(src); }
- void Push(const Operand& src) { push(src); }
+ void Push(Operand src) { push(src); }
void Push(Immediate value) { push(value); }
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Immediate(smi)); }
@@ -320,6 +330,12 @@ class TurboAssembler : public Assembler {
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
private:
bool has_frame_ = false;
Isolate* const isolate_;
@@ -341,7 +357,7 @@ class MacroAssembler : public TurboAssembler {
mov(dst, Immediate(x));
}
}
- void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
+ void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
@@ -349,7 +365,7 @@ class MacroAssembler : public TurboAssembler {
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -358,8 +374,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_equal,
+ void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
@@ -372,7 +387,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
@@ -443,6 +458,7 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
// On function call, call into the debugger if necessary.
+ // This may clobber ecx.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
@@ -455,10 +471,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -467,8 +479,7 @@ class MacroAssembler : public TurboAssembler {
void CmpInstanceType(Register map, InstanceType type);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan, Label* minus_zero,
+ XMMRegister scratch, Label* lost_precision, Label* is_nan,
Label::Distance dst = Label::kFar);
// Smi tagging support.
@@ -575,6 +586,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// Utilities
@@ -584,7 +598,7 @@ class MacroAssembler : public TurboAssembler {
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Pop(Register dst) { pop(dst); }
- void Pop(const Operand& dst) { pop(dst); }
+ void Pop(Operand dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
diff --git a/deps/v8/src/ia32/sse-instr.h b/deps/v8/src/ia32/sse-instr.h
index 82cb0e2e58..7996ee50be 100644
--- a/deps/v8/src/ia32/sse-instr.h
+++ b/deps/v8/src/ia32/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SSE_INSTR_H_
-#define V8_SSE_INSTR_H_
+#ifndef V8_IA32_SSE_INSTR_H_
+#define V8_IA32_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(paddb, 66, 0F, FC) \
@@ -60,4 +60,4 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
-#endif // V8_SSE_INSTR_H_
+#endif // V8_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index dfd88862bd..9800149ae1 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -300,18 +300,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Comment("out of bounds elements access");
Label return_undefined(this);
- // Negative indices aren't valid array indices (according to
- // the ECMAScript specification), and are stored as properties
- // in V8, not elements. So we cannot handle them here.
- GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), miss);
-
// Check if we're allowed to handle OOB accesses.
Node* allow_out_of_bounds =
IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
GotoIfNot(allow_out_of_bounds, miss);
- // For typed arrays we never lookup elements in the prototype chain.
+ // Negative indices aren't valid array indices (according to
+ // the ECMAScript specification), and are stored as properties
+ // in V8, not elements. So we cannot handle them here, except
+ // in case of typed arrays, where integer indexed properties
+ // aren't looked up in the prototype chain.
GotoIf(IsJSTypedArray(holder), &return_undefined);
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), miss);
// For all other receivers we need to check that the prototype chain
// doesn't contain any elements.
@@ -1350,7 +1350,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
BIND(&if_smi_hash);
{
- Node* hash = SmiToWord32(properties);
+ Node* hash = SmiToInt32(properties);
Node* encoded_hash =
Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift));
var_encoded_hash.Bind(encoded_hash);
@@ -1368,7 +1368,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
Node* length_intptr = ChangeInt32ToIntPtr(
Word32And(length_and_hash_int32,
Int32Constant(PropertyArray::LengthField::kMask)));
- Node* length = WordToParameter(length_intptr, mode);
+ Node* length = IntPtrToParameter(length_intptr, mode);
var_length.Bind(length);
Goto(&extend_store);
}
@@ -1412,11 +1412,11 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// TODO(gsathya): Clean up the type conversions by creating smarter
// helpers that do the correct op based on the mode.
Node* new_capacity_int32 =
- TruncateWordToWord32(ParameterToWord(new_capacity, mode));
+ TruncateIntPtrToInt32(ParameterToIntPtr(new_capacity, mode));
Node* new_length_and_hash_int32 =
Word32Or(var_encoded_hash.value(), new_capacity_int32);
StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset,
- SmiFromWord32(new_length_and_hash_int32));
+ SmiFromInt32(new_length_and_hash_int32));
StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
Comment("] Extend storage");
Goto(&done);
@@ -1614,26 +1614,22 @@ void AccessorAssembler::EmitElementLoad(
SmiUntag(CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)));
GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* base_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store =
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
+ Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
Label uint8_elements(this), int8_elements(this), uint16_elements(this),
int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this);
+ float32_elements(this), float64_elements(this), bigint64_elements(this),
+ biguint64_elements(this);
Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
+ BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
const size_t kTypedElementsKindCount =
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
@@ -1645,27 +1641,27 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&int8_elements);
{
Comment("INT8_ELEMENTS");
Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Uint16(), backing_store, index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&int16_elements);
{
Comment("INT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Int16(), backing_store, index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&uint32_elements);
{
@@ -1697,6 +1693,18 @@ void AccessorAssembler::EmitElementLoad(
var_double_value->Bind(element);
Goto(rebox_double);
}
+ BIND(&bigint64_elements);
+ {
+ Comment("BIGINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
+ }
+ BIND(&biguint64_elements);
+ {
+ Comment("BIGUINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGUINT64_ELEMENTS, INTPTR_PARAMETERS));
+ }
}
}
@@ -1718,13 +1726,13 @@ void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset);
Node* metadata =
LoadObjectField(sfi, SharedFunctionInfo::kFeedbackMetadataOffset);
- Node* slot_int = SmiToWord32(slot);
+ Node* slot_int = SmiToInt32(slot);
// See VectorICComputer::index().
const int kItemsPerWord = FeedbackMetadata::VectorICComputer::kItemsPerWord;
Node* word_index = Int32Div(slot_int, Int32Constant(kItemsPerWord));
Node* word_offset = Int32Mod(slot_int, Int32Constant(kItemsPerWord));
- Node* data = SmiToWord32(LoadFixedArrayElement(
+ Node* data = SmiToInt32(LoadFixedArrayElement(
metadata, ChangeInt32ToIntPtr(word_index),
FeedbackMetadata::kReservedIndexCount * kPointerSize, INTPTR_PARAMETERS));
// See VectorICComputer::decode().
@@ -1803,10 +1811,12 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
BIND(&if_oob);
{
Comment("out of bounds");
- // Negative keys can't take the fast OOB path.
- GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), slow);
// Positive OOB indices are effectively the same as hole loads.
- Goto(&if_element_hole);
+ GotoIf(IntPtrGreaterThanOrEqual(index, IntPtrConstant(0)),
+ &if_element_hole);
+ // Negative keys can't take the fast OOB path, except for typed arrays.
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), slow);
+ Return(UndefinedConstant());
}
BIND(&if_element_hole);
@@ -1977,6 +1987,9 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// TODO(jkummerow): Consider supporting JSModuleNamespace.
GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
+ // Private field/symbol lookup is not supported.
+ GotoIf(IsPrivateSymbol(p->name), slow);
+
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
p->context, receiver /*holder is the same as receiver*/, p->name,
@@ -2004,7 +2017,7 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
- Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
+ Node* map32 = TruncateIntPtrToInt32(BitcastTaggedToWord(map));
// Base the offset on a simple combination of name and map.
Node* hash = Int32Add(hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
@@ -2016,8 +2029,8 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
// See v8::internal::StubCache::SecondaryOffset().
// Use the seed from the primary cache in the secondary cache.
- Node* name32 = TruncateWordToWord32(BitcastTaggedToWord(name));
- Node* hash = Int32Sub(TruncateWordToWord32(seed), name32);
+ Node* name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name));
+ Node* hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
@@ -2340,9 +2353,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
Comment("Load lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
TNode<IntPtrT> context_index =
- Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
- Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> context = lazy_context();
TNode<Context> script_context = LoadScriptContext(context, context_index);
TNode<Object> result = LoadContextElement(script_context, slot_index);
@@ -2685,9 +2698,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Comment("Store lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(maybe_weak_cell);
TNode<IntPtrT> context_index =
- Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
- Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> script_context =
LoadScriptContext(CAST(pp->context), context_index);
StoreContextElement(script_context, slot_index, CAST(pp->value));
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 46376dd6a8..3e4f551c14 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
-#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#ifndef V8_IC_ACCESSOR_ASSEMBLER_H_
+#define V8_IC_ACCESSOR_ASSEMBLER_H_
#include "src/code-stub-assembler.h"
@@ -335,4 +335,4 @@ class ExitPoint {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#endif // V8_IC_ACCESSOR_ASSEMBLER_H_
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index d7afd7b655..420f66c174 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
-#define V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#ifndef V8_IC_BINARY_OP_ASSEMBLER_H_
+#define V8_IC_BINARY_OP_ASSEMBLER_H_
#include <functional>
#include "src/code-stub-assembler.h"
@@ -60,4 +60,4 @@ class BinaryOpAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#endif // V8_IC_BINARY_OP_ASSEMBLER_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index d6fa23611e..83ab9d86b8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
+#ifndef V8_IC_IC_INL_H_
+#define V8_IC_IC_INL_H_
#include "src/ic/ic.h"
@@ -59,4 +59,4 @@ bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
} // namespace internal
} // namespace v8
-#endif // V8_IC_INL_H_
+#endif // V8_IC_IC_INL_H_
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 62a2e7cf59..e6fa0b1ceb 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -58,12 +58,18 @@ const char* GetModifier(KeyedAccessLoadMode mode) {
}
const char* GetModifier(KeyedAccessStoreMode mode) {
- if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
- if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- return ".IGNORE_OOB";
+ switch (mode) {
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ return ".COW";
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ return ".STORE+COW";
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ return ".IGNORE_OOB";
+ default:
+ break;
}
- if (IsGrowStoreMode(mode)) return ".GROW";
- return "";
+ DCHECK(!IsCOWHandlingStoreMode(mode));
+ return IsGrowStoreMode(mode) ? ".GROW" : "";
}
} // namespace
@@ -89,12 +95,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
const char* modifier = "";
if (IsKeyedLoadIC()) {
- KeyedAccessLoadMode mode =
- casted_nexus<KeyedLoadICNexus>()->GetKeyedAccessLoadMode();
+ KeyedAccessLoadMode mode = nexus()->GetKeyedAccessLoadMode();
modifier = GetModifier(mode);
} else if (IsKeyedStoreIC()) {
- KeyedAccessStoreMode mode =
- casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ KeyedAccessStoreMode mode = nexus()->GetKeyedAccessStoreMode();
modifier = GetModifier(mode);
}
@@ -147,13 +151,14 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
#define TRACE_IC(type, name) TraceIC(type, name)
-IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
+IC::IC(FrameDepth depth, Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
: isolate_(isolate),
vector_set_(false),
kind_(FeedbackSlotKind::kInvalid),
target_maps_set_(false),
slow_stub_reason_(nullptr),
- nexus_(nexus) {
+ nexus_(vector, slot) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -199,9 +204,8 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- DCHECK_NOT_NULL(nexus);
- kind_ = nexus->kind();
- state_ = nexus->StateFromFeedback();
+ kind_ = nexus_.kind();
+ state_ = nexus_.StateFromFeedback();
old_state_ = state_;
}
@@ -251,12 +255,12 @@ static void LookupForRead(LookupIterator* it) {
bool IC::ShouldRecomputeHandler(Handle<String> name) {
if (!RecomputeHandlerForName(name)) return false;
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
-
// This is a contextual access, always just update the handler and stay
// monomorphic.
if (IsGlobalIC()) return true;
+ maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
+
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
// to a more general elements kind.
@@ -315,6 +319,13 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
isolate(), NewReferenceError(MessageTemplate::kNotDefined, name), Object);
}
+// static
+void IC::OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
+ JSFunction* host_function, const char* reason) {
+ FeedbackVector* vector = nexus->vector();
+ FeedbackSlot slot = nexus->slot();
+ OnFeedbackChanged(isolate, vector, slot, host_function, reason);
+}
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
@@ -385,21 +396,15 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
vector_set_ = true;
OnFeedbackChanged(
- isolate(), *vector(), slot(), GetHostFunction(),
+ isolate(), nexus(), GetHostFunction(),
new_state == PREMONOMORPHIC ? "Premonomorphic" : "Megamorphic");
return changed;
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler) {
- if (IsLoadGlobalIC()) {
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigureHandlerMode(handler);
-
- } else if (IsStoreGlobalIC()) {
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- nexus->ConfigureHandlerMode(handler);
-
+ if (IsGlobalIC()) {
+ nexus()->ConfigureHandlerMode(handler);
} else {
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
@@ -407,7 +412,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(),
IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
}
@@ -419,8 +424,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
nexus()->ConfigurePolymorphic(name, maps, handlers);
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
- "Polymorphic");
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Polymorphic");
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
@@ -451,6 +455,19 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
LookupIterator it(object, name);
LookupForRead(&it);
+ if (name->IsPrivate()) {
+ if (name->IsPrivateField() && !it.IsFound()) {
+ return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
+ name);
+ }
+
+ // IC handling of private symbols/fields lookup on JSProxy is not
+ // supported.
+ if (object->IsJSProxy()) {
+ use_ic = false;
+ }
+ }
+
if (it.IsFound() || !ShouldThrowReferenceError()) {
// Update inline cache and stub cache.
if (use_ic) UpdateCaches(&it);
@@ -492,9 +509,8 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
}
if (FLAG_use_ic) {
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -638,14 +654,14 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
UpdateMonomorphicIC(handler, name);
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
case POLYMORPHIC:
if (UpdatePolymorphicIC(name, handler)) break;
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
CopyICToMegamorphicCache(name);
}
ConfigureVectorState(MEGAMORPHIC, name);
- // Fall through.
+ V8_FALLTHROUGH;
case MEGAMORPHIC:
UpdateMegamorphicCache(*receiver_map(), *name, *handler);
// Indicate that we've handled this case.
@@ -685,8 +701,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
TRACE_IC("LoadGlobalIC", lookup->name());
return;
}
@@ -1199,8 +1214,14 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!object->IsJSValue()) {
- if ((object->IsJSReceiver() || object->IsString()) &&
- key->ToArrayIndex(&index)) {
+ // For regular JSReceiver or String {object}s the {key} must be a positive
+ // array index, for JSTypedArray {object}s we can also support negative
+ // {key}s which we just map into the [2*31,2*32-1] range (via a bit_cast).
+ // This is valid since JSTypedArray::length is always a Smi.
+ if (((object->IsJSReceiver() || object->IsString()) &&
+ key->ToArrayIndex(&index)) ||
+ (object->IsJSTypedArray() &&
+ key->ToInt32(bit_cast<int32_t*>(&index)))) {
KeyedAccessLoadMode load_mode = GetLoadMode(object, index);
UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
@@ -1287,7 +1308,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
}
- receiver = it->GetStoreTarget();
+ receiver = it->GetStoreTarget<JSObject>();
if (it->ExtendingNonExtensible(receiver)) return false;
created_new_transition_ =
it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
@@ -1322,9 +1343,8 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
}
if (FLAG_use_ic) {
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -1383,7 +1403,23 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
LookupIterator it = LookupIterator::ForTransitionHandler(
isolate(), object, name, value, cached_handler, transition_map);
- if (FLAG_use_ic) UpdateCaches(&it, value, store_mode, cached_handler);
+
+ bool use_ic = FLAG_use_ic;
+
+ if (name->IsPrivate()) {
+ if (name->IsPrivateField() && !it.IsFound()) {
+ return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
+ name);
+ }
+
+ // IC handling of private fields/symbols stores on JSProxy is not
+ // supported.
+ if (object->IsJSProxy()) {
+ use_ic = false;
+ }
+ }
+
+ if (use_ic) UpdateCaches(&it, value, store_mode, cached_handler);
MAYBE_RETURN_NULL(
Object::SetProperty(&it, value, language_mode(), store_mode));
@@ -1411,8 +1447,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
TRACE_IC("StoreGlobalIC", lookup->name());
return;
}
@@ -1439,7 +1474,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
case LookupIterator::TRANSITION: {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- Handle<JSObject> store_target = lookup->GetStoreTarget();
+ Handle<JSObject> store_target = lookup->GetStoreTarget<JSObject>();
if (store_target->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransitionDH);
@@ -1692,7 +1727,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
- (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ (store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
// A "normal" IC that handles stores can switch to a version that can
@@ -1787,10 +1822,10 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
}
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_fixed_typed_array_elements());
- // Fall through
+ V8_FALLTHROUGH;
case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE:
- case STORE_AND_GROW_NO_TRANSITION:
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
return map;
}
UNREACHABLE();
@@ -1799,7 +1834,7 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
DCHECK(!receiver_map->DictionaryElementsInPrototypeChainOnly());
@@ -1840,7 +1875,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
MapHandles* receiver_maps, ObjectHandles* handlers,
KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
@@ -1915,7 +1950,7 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
}
- return STORE_AND_GROW_NO_TRANSITION;
+ return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
} else {
// Handle only in-bounds elements accesses.
if (receiver->HasSmiElements()) {
@@ -2005,7 +2040,13 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
old_receiver_map = handle(receiver->map(), isolate());
is_arguments = receiver->IsJSArgumentsObject();
bool is_proxy = receiver->IsJSProxy();
- key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
+ // For JSTypedArray {object}s we can handle negative indices as OOB
+ // accesses, since integer indexed properties are never looked up
+ // on the prototype chain. For this we simply map the negative {key}s
+ // to the [2**31,2**32-1] range, which is safe since JSTypedArray::length
+ // is always an unsigned Smi.
+ key_is_valid_index =
+ key->IsSmi() && (Smi::ToInt(*key) >= 0 || object->IsJSTypedArray());
if (!is_arguments && !is_proxy) {
if (key_is_valid_index) {
uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
@@ -2071,29 +2112,26 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
FeedbackSlotKind kind = vector->GetKind(vector_slot);
if (IsLoadICKind(kind)) {
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(isolate, &nexus);
+ LoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
} else if (IsLoadGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(isolate, &nexus);
+ LoadGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
} else {
DCHECK(IsKeyedLoadICKind(kind));
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(isolate, &nexus);
+ KeyedLoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2108,10 +2146,9 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
Handle<String> name = args.at<String>(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(isolate, &nexus);
+ LoadGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(global, name);
Handle<Object> result;
@@ -2150,7 +2187,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
if (!is_found) {
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
// It is actually a LoadGlobalICs here but the predicate handles this case
// properly.
@@ -2171,9 +2208,8 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ KeyedLoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2188,24 +2224,21 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(isolate, &nexus);
+ StoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else if (IsStoreGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- StoreGlobalICNexus nexus(vector, vector_slot);
- StoreGlobalIC ic(isolate, &nexus);
+ StoreGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
} else {
DCHECK(IsKeyedStoreICKind(kind));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(isolate, &nexus);
+ KeyedStoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2219,9 +2252,8 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Name> key = args.at<Name>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- StoreGlobalICNexus nexus(vector, vector_slot);
- StoreGlobalIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ StoreGlobalIC ic(isolate, vector, vector_slot);
Handle<JSGlobalObject> global = isolate->global_object();
ic.UpdateState(global, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
@@ -2238,7 +2270,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
#ifdef DEBUG
{
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
DCHECK(IsStoreGlobalICKind(slot_kind));
Handle<Object> receiver = args.at(3);
@@ -2272,7 +2304,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
return *value;
}
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -2289,9 +2321,8 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ KeyedStoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2306,7 +2337,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> object = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -2324,7 +2355,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(3);
Handle<Smi> slot = args.at<Smi>(4);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
@@ -2336,11 +2367,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_Unreachable) {
- UNREACHABLE();
-}
-
-
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
@@ -2413,7 +2439,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
Handle<Smi> slot = args.at<Smi>(3);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
// It could actually be any kind of load IC slot here but the predicate
// handles all the cases properly.
@@ -2436,7 +2462,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<JSObject> receiver = args.at<JSObject>(3);
Handle<Name> name = args.at<Name>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
// TODO(ishell): Cache interceptor_holder in the store handler like we do
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index a63202395b..8a47d8d19c 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IC_H_
-#define V8_IC_H_
+#ifndef V8_IC_IC_H_
+#define V8_IC_IC_H_
#include <vector>
@@ -36,7 +36,8 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = nullptr);
+ IC(FrameDepth depth, Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot);
virtual ~IC() {}
State state() const { return state_; }
@@ -67,6 +68,9 @@ class IC {
FeedbackSlot slot, JSFunction* host_function,
const char* reason);
+ static void OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
+ JSFunction* host_function, const char* reason);
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -151,17 +155,12 @@ class IC {
return !target_maps_.empty() ? *target_maps_[0] : nullptr;
}
- Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
- FeedbackSlot slot() const { return nexus()->slot(); }
State saved_state() const {
return state() == RECOMPUTE_HANDLER ? old_state_ : state();
}
- template <class NexusClass>
- NexusClass* casted_nexus() {
- return static_cast<NexusClass*>(nexus_);
- }
- FeedbackNexus* nexus() const { return nexus_; }
+ const FeedbackNexus* nexus() const { return &nexus_; }
+ FeedbackNexus* nexus() { return &nexus_; }
private:
inline Address constant_pool() const;
@@ -200,7 +199,7 @@ class IC {
const char* slow_stub_reason_;
- FeedbackNexus* nexus_;
+ FeedbackNexus nexus_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -208,18 +207,15 @@ class IC {
class CallIC : public IC {
public:
- CallIC(Isolate* isolate, CallICNexus* nexus)
- : IC(EXTRA_CALL_FRAME, isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
- }
+ CallIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(EXTRA_CALL_FRAME, isolate, vector, slot) {}
};
class LoadIC : public IC {
public:
- LoadIC(Isolate* isolate, FeedbackNexus* nexus)
- : IC(NO_EXTRA_FRAME, isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
+ LoadIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(NO_EXTRA_FRAME, isolate, vector, slot) {
DCHECK(IsAnyLoad());
}
@@ -252,8 +248,9 @@ class LoadIC : public IC {
class LoadGlobalIC : public LoadIC {
public:
- LoadGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
- : LoadIC(isolate, nexus) {}
+ LoadGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : LoadIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
@@ -265,10 +262,9 @@ class LoadGlobalIC : public LoadIC {
class KeyedLoadIC : public LoadIC {
public:
- KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
- : LoadIC(isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
- }
+ KeyedLoadIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : LoadIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Object> key);
@@ -297,14 +293,12 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- StoreIC(Isolate* isolate, FeedbackNexus* nexus)
- : IC(NO_EXTRA_FRAME, isolate, nexus) {
+ StoreIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(NO_EXTRA_FRAME, isolate, vector, slot) {
DCHECK(IsAnyStore());
}
- LanguageMode language_mode() const {
- return nexus()->vector()->GetLanguageMode(nexus()->slot());
- }
+ LanguageMode language_mode() const { return nexus()->GetLanguageMode(); }
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -337,8 +331,9 @@ class StoreIC : public IC {
class StoreGlobalIC : public StoreIC {
public:
- StoreGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
- : StoreIC(isolate, nexus) {}
+ StoreGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : StoreIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
@@ -358,11 +353,12 @@ enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
class KeyedStoreIC : public StoreIC {
public:
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
- return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ return nexus()->GetKeyedAccessStoreMode();
}
- KeyedStoreIC(Isolate* isolate, KeyedStoreICNexus* nexus)
- : StoreIC(isolate, nexus) {}
+ KeyedStoreIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : StoreIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
@@ -389,4 +385,4 @@ class KeyedStoreIC : public StoreIC {
} // namespace internal
} // namespace v8
-#endif // V8_IC_H_
+#endif // V8_IC_IC_H_
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index b9a11c2ec7..4997267ddd 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -428,9 +428,10 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
void KeyedStoreGenericAssembler::EmitGenericElementStore(
Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
Node* value, Node* context, Label* slow) {
- Label if_fast(this), if_in_bounds(this), if_increment_length_by_one(this),
- if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
- if_typed_array(this), if_dictionary(this);
+ Label if_fast(this), if_in_bounds(this), if_out_of_bounds(this),
+ if_increment_length_by_one(this), if_bump_length_with_gap(this),
+ if_grow(this), if_nonfast(this), if_typed_array(this),
+ if_dictionary(this);
Node* elements = LoadElements(receiver);
Node* elements_kind = LoadMapElementsKind(receiver_map);
Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
@@ -440,7 +441,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &if_array);
{
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
- Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
+ Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds,
+ &if_out_of_bounds);
}
BIND(&if_array);
{
@@ -459,6 +461,16 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
kDontChangeLength);
}
+ BIND(&if_out_of_bounds);
+ {
+ // Integer indexed out-of-bounds accesses to typed arrays are simply
+ // ignored, since we never look up integer indexed properties on the
+ // prototypes of typed arrays. For all other types, we may need to
+ // grow the backing store.
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), &if_grow);
+ Return(value);
+ }
+
BIND(&if_increment_length_by_one);
{
StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
@@ -911,9 +923,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&strict);
{
- Node* message = SmiConstant(MessageTemplate::kNoSetterInCallback);
- TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
- var_accessor_holder.value());
+ ThrowTypeError(p->context, MessageTemplate::kNoSetterInCallback,
+ p->name, var_accessor_holder.value());
}
}
}
@@ -926,10 +937,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&strict);
{
- Node* message = SmiConstant(MessageTemplate::kStrictReadOnlyProperty);
Node* type = Typeof(p->receiver);
- TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
- type, p->receiver);
+ ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
+ p->name, type, p->receiver);
}
}
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 4d82840be3..1a0de3b2b4 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
-#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#ifndef V8_IC_KEYED_STORE_GENERIC_H_
+#define V8_IC_KEYED_STORE_GENERIC_H_
#include "src/globals.h"
@@ -27,4 +27,4 @@ class StoreICUninitializedGenerator {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#endif // V8_IC_KEYED_STORE_GENERIC_H_
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index cd081edfb2..870266eefd 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
+#ifndef V8_IC_STUB_CACHE_H_
+#define V8_IC_STUB_CACHE_H_
#include "src/macro-assembler.h"
#include "src/objects/name.h"
@@ -140,4 +140,4 @@ class StubCache {
} // namespace internal
} // namespace v8
-#endif // V8_STUB_CACHE_H_
+#endif // V8_IC_STUB_CACHE_H_
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index b652d6a6db..6a3bd4ca61 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
static const int kInitialIdentityMapSize = 4;
-static const int kResizeFactor = 4;
+static const int kResizeFactor = 2;
IdentityMapBase::~IdentityMapBase() {
// Clear must be called by the subclass to avoid calling the virtual
@@ -87,7 +87,8 @@ void* IdentityMapBase::DeleteIndex(int index) {
size_--;
DCHECK_GE(size_, 0);
- if (size_ * kResizeFactor < capacity_ / kResizeFactor) {
+ if (capacity_ > kInitialIdentityMapSize &&
+ size_ * kResizeFactor < capacity_ / kResizeFactor) {
Resize(capacity_ / kResizeFactor);
return ret_value; // No need to fix collisions as resize reinserts keys.
}
@@ -194,6 +195,14 @@ void* IdentityMapBase::DeleteEntry(Object* key) {
return DeleteIndex(index);
}
+Object* IdentityMapBase::KeyAtIndex(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, capacity_);
+ DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+ CHECK(is_iterable()); // Must be iterable to access by index;
+ return keys_[index];
+}
+
IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index cd198e1cb5..4e69d3198a 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -46,6 +46,8 @@ class IdentityMapBase {
void* DeleteEntry(Object* key);
void Clear();
+ Object* KeyAtIndex(int index) const;
+
V8_EXPORT_PRIVATE RawEntry EntryAtIndex(int index) const;
V8_EXPORT_PRIVATE int NextIndex(int index) const;
@@ -126,8 +128,13 @@ class IdentityMap : public IdentityMapBase {
return *this;
}
- V* operator*() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
- V* operator->() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+ Object* key() const { return map_->KeyAtIndex(index_); }
+ V* entry() const {
+ return reinterpret_cast<V*>(map_->EntryAtIndex(index_));
+ }
+
+ V* operator*() { return entry(); }
+ V* operator->() { return entry(); }
bool operator!=(const Iterator& other) { return index_ != other.index_; }
private:
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 85b506a956..f396d64b99 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -7,6 +7,7 @@ include_rules = [
"+src/base/platform/platform.h",
"+src/conversions.h",
"+src/flags.h",
+ "+src/utils.h",
"+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index a5e981cda5..d13e5f8695 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -49,6 +49,7 @@ namespace v8_inspector {
namespace {
static const char privateKeyName[] = "v8-inspector#injectedScript";
+static const char kGlobalHandleLabel[] = "DevTools console";
} // namespace
using protocol::Array;
@@ -511,6 +512,7 @@ v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
void InjectedScript::setLastEvaluationResult(v8::Local<v8::Value> result) {
m_lastEvaluationResult.Reset(m_context->isolate(), result);
+ m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
}
Response InjectedScript::resolveCallArgument(
@@ -601,8 +603,10 @@ Response InjectedScript::wrapEvaluateResult(
Response response = wrapObject(resultValue, objectGroup, returnByValue,
generatePreview, result);
if (!response.isSuccess()) return response;
- if (objectGroup == "console")
+ if (objectGroup == "console") {
m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
+ m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
+ }
} else {
v8::Local<v8::Value> exception = tryCatch.Exception();
Response response =
@@ -624,6 +628,7 @@ v8::Local<v8::Object> InjectedScript::commandLineAPI() {
m_context->isolate(),
m_context->inspector()->console()->createCommandLineAPI(
m_context->context(), m_sessionId));
+ m_commandLineAPI.AnnotateStrongRetainer(kGlobalHandleLabel);
}
return m_commandLineAPI.Get(m_context->isolate());
}
@@ -769,6 +774,7 @@ int InjectedScript::bindObject(v8::Local<v8::Value> value,
if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
int id = m_lastBoundObjectId++;
m_idToWrappedObject[id].Reset(m_context->isolate(), value);
+ m_idToWrappedObject[id].AnnotateStrongRetainer(kGlobalHandleLabel);
if (!groupName.isEmpty() && id > 0) {
m_idToObjectGroupName[id] = groupName;
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 16938fb317..90a1ed3171 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_INJECTEDSCRIPT_H_
-#define V8_INSPECTOR_INJECTEDSCRIPT_H_
+#ifndef V8_INSPECTOR_INJECTED_SCRIPT_H_
+#define V8_INSPECTOR_INJECTED_SCRIPT_H_
#include <unordered_map>
#include <unordered_set>
@@ -220,4 +220,4 @@ class InjectedScript final {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_INJECTEDSCRIPT_H_
+#endif // V8_INSPECTOR_INJECTED_SCRIPT_H_
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index ac33071f62..ef0a0ca52a 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_INSPECTEDCONTEXT_H_
-#define V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#ifndef V8_INSPECTOR_INSPECTED_CONTEXT_H_
+#define V8_INSPECTOR_INSPECTED_CONTEXT_H_
#include <unordered_map>
#include <unordered_set>
@@ -65,4 +65,4 @@ class InspectedContext {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#endif // V8_INSPECTOR_INSPECTED_CONTEXT_H_
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
deleted file mode 100644
index 3d59cc089d..0000000000
--- a/deps/v8/src/inspector/inspector.gyp
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'protocol_path': '../../third_party/inspector_protocol',
- },
- 'includes': [
- 'inspector.gypi',
- '<(PRODUCT_DIR)/../../../third_party/inspector_protocol/inspector_protocol.gypi',
- ],
- 'targets': [
- { 'target_name': 'inspector_injected_script',
- 'type': 'none',
- 'toolsets': ['target'],
- 'actions': [
- {
- 'action_name': 'convert_js_to_cpp_char_array',
- 'inputs': [
- 'build/xxd.py',
- '<(inspector_injected_script_source)',
- ],
- 'outputs': [
- '<(inspector_generated_injected_script)',
- ],
- 'action': [
- 'python',
- 'build/xxd.py',
- 'InjectedScriptSource_js',
- 'injected-script-source.js',
- '<@(_outputs)'
- ],
- },
- ],
- # Since this target generates header files, it needs to be a hard dependency.
- 'hard_dependency': 1,
- },
- { 'target_name': 'protocol_compatibility',
- 'type': 'none',
- 'toolsets': ['target'],
- 'actions': [
- {
- 'action_name': 'protocol_compatibility',
- 'inputs': [
- 'js_protocol.json',
- ],
- 'outputs': [
- '<@(SHARED_INTERMEDIATE_DIR)/src/js_protocol.stamp',
- ],
- 'action': [
- 'python',
- '<(protocol_path)/CheckProtocolCompatibility.py',
- '--stamp', '<@(_outputs)',
- 'js_protocol.json',
- ],
- 'message': 'Generating inspector protocol sources from protocol json definition',
- },
- ]
- },
- { 'target_name': 'protocol_generated_sources',
- 'type': 'none',
- 'dependencies': [ 'protocol_compatibility' ],
- 'toolsets': ['target'],
- 'actions': [
- {
- 'action_name': 'protocol_generated_sources',
- 'inputs': [
- 'js_protocol.json',
- 'inspector_protocol_config.json',
- '<@(inspector_protocol_files)',
- ],
- 'outputs': [
- '<@(inspector_generated_sources)',
- ],
- 'action': [
- 'python',
- '<(protocol_path)/CodeGenerator.py',
- '--jinja_dir', '../../third_party',
- '--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
- '--config', 'inspector_protocol_config.json',
- ],
- 'message': 'Generating inspector protocol sources from protocol json',
- },
- ]
- },
- ],
-}
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
deleted file mode 100644
index d6443283f5..0000000000
--- a/deps/v8/src/inspector/inspector.gypi
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'inspector_generated_sources': [
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
- ],
-
- 'inspector_injected_script_source': 'injected-script-source.js',
- 'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
-
- 'inspector_all_sources': [
- '<@(inspector_generated_sources)',
- '<(inspector_generated_injected_script)',
- '../include/v8-inspector.h',
- '../include/v8-inspector-protocol.h',
- 'inspector/injected-script.cc',
- 'inspector/injected-script.h',
- 'inspector/inspected-context.cc',
- 'inspector/inspected-context.h',
- 'inspector/remote-object-id.cc',
- 'inspector/remote-object-id.h',
- 'inspector/search-util.cc',
- 'inspector/search-util.h',
- 'inspector/string-16.cc',
- 'inspector/string-16.h',
- 'inspector/string-util.cc',
- 'inspector/string-util.h',
- 'inspector/test-interface.cc',
- 'inspector/test-interface.h',
- 'inspector/v8-console.cc',
- 'inspector/v8-console.h',
- 'inspector/v8-console-agent-impl.cc',
- 'inspector/v8-console-agent-impl.h',
- 'inspector/v8-console-message.cc',
- 'inspector/v8-console-message.h',
- 'inspector/v8-debugger.cc',
- 'inspector/v8-debugger.h',
- 'inspector/v8-debugger-agent-impl.cc',
- 'inspector/v8-debugger-agent-impl.h',
- 'inspector/v8-debugger-script.cc',
- 'inspector/v8-debugger-script.h',
- 'inspector/v8-function-call.cc',
- 'inspector/v8-function-call.h',
- 'inspector/v8-heap-profiler-agent-impl.cc',
- 'inspector/v8-heap-profiler-agent-impl.h',
- 'inspector/v8-injected-script-host.cc',
- 'inspector/v8-injected-script-host.h',
- 'inspector/v8-inspector-impl.cc',
- 'inspector/v8-inspector-impl.h',
- 'inspector/v8-inspector-session-impl.cc',
- 'inspector/v8-inspector-session-impl.h',
- 'inspector/v8-internal-value-type.cc',
- 'inspector/v8-internal-value-type.h',
- 'inspector/v8-profiler-agent-impl.cc',
- 'inspector/v8-profiler-agent-impl.h',
- 'inspector/v8-regex.cc',
- 'inspector/v8-regex.h',
- 'inspector/v8-runtime-agent-impl.cc',
- 'inspector/v8-runtime-agent-impl.h',
- 'inspector/v8-schema-agent-impl.cc',
- 'inspector/v8-schema-agent-impl.h',
- 'inspector/v8-stack-trace-impl.cc',
- 'inspector/v8-stack-trace-impl.h',
- 'inspector/v8-value-utils.cc',
- 'inspector/v8-value-utils.h',
- 'inspector/wasm-translation.cc',
- 'inspector/wasm-translation.h',
- ]
- }
-}
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index 3e6928a87e..923274236d 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_REMOTEOBJECTID_H_
-#define V8_INSPECTOR_REMOTEOBJECTID_H_
+#ifndef V8_INSPECTOR_REMOTE_OBJECT_ID_H_
+#define V8_INSPECTOR_REMOTE_OBJECT_ID_H_
#include "src/inspector/protocol/Forward.h"
@@ -54,4 +54,4 @@ class RemoteCallFrameId final : public RemoteObjectIdBase {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_REMOTEOBJECTID_H_
+#endif // V8_INSPECTOR_REMOTE_OBJECT_ID_H_
diff --git a/deps/v8/src/inspector/search-util.h b/deps/v8/src/inspector/search-util.h
index 8f5753b620..3c8a9fe31c 100644
--- a/deps/v8/src/inspector/search-util.h
+++ b/deps/v8/src/inspector/search-util.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_SEARCHUTIL_H_
-#define V8_INSPECTOR_SEARCHUTIL_H_
+#ifndef V8_INSPECTOR_SEARCH_UTIL_H_
+#define V8_INSPECTOR_SEARCH_UTIL_H_
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/string-util.h"
@@ -21,4 +21,4 @@ searchInTextByLinesImpl(V8InspectorSession*, const String16& text,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_SEARCHUTIL_H_
+#endif // V8_INSPECTOR_SEARCH_UTIL_H_
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index dc753fee40..43343c887b 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -136,16 +136,19 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
result = targetExhausted;
break;
}
- switch (bytesToWrite) { // note: everything falls through.
+ switch (bytesToWrite) {
case 4:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 3:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 2:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 1:
*--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
}
@@ -210,8 +213,10 @@ static bool isLegalUTF8(const unsigned char* source, int length) {
// Everything else falls through when "true"...
case 4:
if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
case 3:
if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
case 2:
if ((a = (*--srcptr)) > 0xBF) return false;
@@ -232,6 +237,7 @@ static bool isLegalUTF8(const unsigned char* source, int length) {
default:
if (a < 0x80) return false;
}
+ V8_FALLTHROUGH;
case 1:
if (*source >= 0x80 && *source < 0xC2) return false;
@@ -258,18 +264,23 @@ static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
case 6:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 5:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 4:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 3:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 2:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 1:
character += static_cast<unsigned char>(*sequence++);
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 1dc9350e96..7d6867dfc3 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_STRING16_H_
-#define V8_INSPECTOR_STRING16_H_
+#ifndef V8_INSPECTOR_STRING_16_H_
+#define V8_INSPECTOR_STRING_16_H_
#include <stdint.h>
#include <cctype>
@@ -149,4 +149,4 @@ struct hash<v8_inspector::String16> {
#endif // !defined(__APPLE__) || defined(_LIBCPP_VERSION)
-#endif // V8_INSPECTOR_STRING16_H_
+#endif // V8_INSPECTOR_STRING_16_H_
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 8aaf3ce850..0c025ef93a 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_STRINGUTIL_H_
-#define V8_INSPECTOR_STRINGUTIL_H_
+#ifndef V8_INSPECTOR_STRING_UTIL_H_
+#define V8_INSPECTOR_STRING_UTIL_H_
#include <memory>
@@ -92,4 +92,4 @@ String16 stackTraceIdToString(uintptr_t id);
} // namespace v8_inspector
-#endif // V8_INSPECTOR_STRINGUTIL_H_
+#endif // V8_INSPECTOR_STRING_UTIL_H_
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
index 70fbca186f..946d1f6020 100644
--- a/deps/v8/src/inspector/test-interface.h
+++ b/deps/v8/src/inspector/test-interface.h
@@ -16,4 +16,4 @@ V8_EXPORT void DumpAsyncTaskStacksStateForTest(V8Inspector* inspector);
} // v8_inspector
-#endif // V8_INSPECTOR_TEST_INTERFACE_H_
+#endif // V8_INSPECTOR_TEST_INTERFACE_H_
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.h b/deps/v8/src/inspector/v8-console-agent-impl.h
index db17e54718..f436aa2f5c 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.h
+++ b/deps/v8/src/inspector/v8-console-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
-#define V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Console.h"
@@ -45,4 +45,4 @@ class V8ConsoleAgentImpl : public protocol::Console::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index e96e89c0eb..ea50a8dfee 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -58,6 +58,7 @@ String16 consoleAPITypeValue(ConsoleAPIType type) {
return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
}
+const char kGlobalConsoleMessageHandleLabel[] = "DevTools console";
const unsigned maxConsoleMessageCount = 1000;
const int maxConsoleMessageV8Size = 10 * 1024 * 1024;
const unsigned maxArrayItemsLimit = 10000;
@@ -379,8 +380,10 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_type = type;
message->m_contextId = contextId;
for (size_t i = 0; i < arguments.size(); ++i) {
- message->m_arguments.push_back(std::unique_ptr<v8::Global<v8::Value>>(
- new v8::Global<v8::Value>(isolate, arguments.at(i))));
+ std::unique_ptr<v8::Global<v8::Value>> argument(
+ new v8::Global<v8::Value>(isolate, arguments.at(i)));
+ argument->AnnotateStrongRetainer(kGlobalConsoleMessageHandleLabel);
+ message->m_arguments.push_back(std::move(argument));
message->m_v8Size +=
v8::debug::EstimatedValueSize(isolate, arguments.at(i));
}
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index f82f8e5a13..103cb9002b 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLEMESSAGE_H_
-#define V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
+#define V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
#include <deque>
#include <map>
@@ -138,4 +138,4 @@ class V8ConsoleMessageStorage {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index ba4dfe328b..311625efde 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLE_H_
-#define V8_INSPECTOR_V8CONSOLE_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_H_
+#define V8_INSPECTOR_V8_CONSOLE_H_
#include "src/base/macros.h"
@@ -172,4 +172,4 @@ class V8Console : public v8::debug::ConsoleDelegate {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLE_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_H_
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 7bfde09b71..78325ef978 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -57,6 +57,8 @@ static const char kDebuggerNotPaused[] =
static const size_t kBreakpointHintMaxLength = 128;
static const intptr_t kBreakpointHintMaxSearchOffset = 80 * 10;
+static const int kMaxScriptFailedToParseScripts = 1000;
+
namespace {
void TranslateLocation(protocol::Debugger::Location* location,
@@ -1416,7 +1418,13 @@ void V8DebuggerAgentImpl::didParseSource(
static_cast<int>(scriptRef->source().length()), std::move(stackTrace));
}
- if (!success) return;
+ if (!success) {
+ if (scriptURL.isEmpty()) {
+ m_failedToParseAnonymousScriptIds.push_back(scriptId);
+ cleanupOldFailedToParseAnonymousScriptsIfNeeded();
+ }
+ return;
+ }
std::vector<protocol::DictionaryValue*> potentialBreakpoints;
if (!scriptURL.isEmpty()) {
@@ -1618,4 +1626,18 @@ void V8DebuggerAgentImpl::reset() {
m_breakpointIdToDebuggerBreakpointIds.clear();
}
+void V8DebuggerAgentImpl::cleanupOldFailedToParseAnonymousScriptsIfNeeded() {
+ if (m_failedToParseAnonymousScriptIds.size() <=
+ kMaxScriptFailedToParseScripts)
+ return;
+ static_assert(kMaxScriptFailedToParseScripts > 100,
+ "kMaxScriptFailedToParseScripts should be greater then 100");
+ while (m_failedToParseAnonymousScriptIds.size() >
+ kMaxScriptFailedToParseScripts - 100 + 1) {
+ String16 scriptId = m_failedToParseAnonymousScriptIds.front();
+ m_failedToParseAnonymousScriptIds.pop_front();
+ m_scripts.erase(scriptId);
+ }
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 168c5a7724..6feaeff914 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
-#define V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
+#include <deque>
#include <vector>
#include "src/base/macros.h"
@@ -192,6 +193,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
DebuggerBreakpointIdToBreakpointIdMap m_debuggerBreakpointIdToBreakpointId;
+ std::deque<String16> m_failedToParseAnonymousScriptIds;
+ void cleanupOldFailedToParseAnonymousScriptsIfNeeded();
+
using BreakReason =
std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
std::vector<BreakReason> m_breakReason;
@@ -215,4 +219,4 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type);
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 6ec7f32c89..c596ee5053 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -7,12 +7,14 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/wasm-translation.h"
+#include "src/utils.h"
namespace v8_inspector {
namespace {
const char hexDigits[17] = "0123456789ABCDEF";
+const char kGlobalDebuggerScriptHandleLabel[] = "DevTools debugger";
void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
for (size_t i = 0; i < 8; ++i) {
@@ -43,11 +45,12 @@ String16 calculateHash(const String16& str) {
const uint32_t* data = nullptr;
size_t sizeInBytes = sizeof(UChar) * str.length();
data = reinterpret_cast<const uint32_t*>(str.characters16());
- for (size_t i = 0; i < sizeInBytes / 4; i += 4) {
+ for (size_t i = 0; i < sizeInBytes / 4; ++i) {
+ uint32_t d = v8::internal::ReadUnalignedUInt32(data + i);
#if V8_TARGET_LITTLE_ENDIAN
- uint32_t v = data[i];
+ uint32_t v = d;
#else
- uint32_t v = (data[i] << 16) | (data[i] >> 16);
+ uint32_t v = (d << 16) | (d >> 16);
#endif
uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
@@ -56,15 +59,16 @@ String16 calculateHash(const String16& str) {
}
if (sizeInBytes % 4) {
uint32_t v = 0;
+ const uint8_t* data_8b = reinterpret_cast<const uint8_t*>(data);
for (size_t i = sizeInBytes - sizeInBytes % 4; i < sizeInBytes; ++i) {
v <<= 8;
#if V8_TARGET_LITTLE_ENDIAN
- v |= reinterpret_cast<const uint8_t*>(data)[i];
+ v |= data_8b[i];
#else
if (i % 2) {
- v |= reinterpret_cast<const uint8_t*>(data)[i - 1];
+ v |= data_8b[i - 1];
} else {
- v |= reinterpret_cast<const uint8_t*>(data)[i + 1];
+ v |= data_8b[i + 1];
}
#endif
}
@@ -147,6 +151,7 @@ class ActualScript : public V8DebuggerScript {
m_isModule = script->IsModule();
m_script.Reset(m_isolate, script);
+ m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
}
bool isLiveEdit() const override { return m_isLiveEdit; }
@@ -264,6 +269,7 @@ class WasmVirtualScript : public V8DebuggerScript {
: V8DebuggerScript(isolate, std::move(id), std::move(url)),
m_script(isolate, script),
m_wasmTranslation(wasmTranslation) {
+ m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
int num_lines = 0;
int last_newline = -1;
size_t next_newline = source.find('\n', last_newline + 1);
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index f1e28184b5..6badd87c97 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -27,8 +27,8 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
-#define V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
+#define V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -111,4 +111,4 @@ class V8DebuggerScript {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index c86f320252..9b0ca38018 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -494,7 +494,6 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
void V8Debugger::BreakProgramRequested(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Object>,
- v8::Local<v8::Value>,
const std::vector<v8::debug::BreakpointId>& break_points_hit) {
handleProgramBreak(pausedContext, v8::Local<v8::Value>(), break_points_hit);
}
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 4828fcad52..a710726581 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8DEBUGGER_H_
-#define V8_INSPECTOR_V8DEBUGGER_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_H_
+#define V8_INSPECTOR_V8_DEBUGGER_H_
#include <list>
#include <unordered_map>
@@ -169,7 +169,6 @@ class V8Debugger : public v8::debug::DebugDelegate {
bool has_compile_error) override;
void BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object>,
- v8::Local<v8::Value>,
const std::vector<v8::debug::BreakpointId>& break_points_hit) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object>, v8::Local<v8::Value> exception,
@@ -240,4 +239,4 @@ class V8Debugger : public v8::debug::DebugDelegate {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGER_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_H_
diff --git a/deps/v8/src/inspector/v8-function-call.h b/deps/v8/src/inspector/v8-function-call.h
index 0337caa339..28a5886c91 100644
--- a/deps/v8/src/inspector/v8-function-call.h
+++ b/deps/v8/src/inspector/v8-function-call.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8FUNCTIONCALL_H_
-#define V8_INSPECTOR_V8FUNCTIONCALL_H_
+#ifndef V8_INSPECTOR_V8_FUNCTION_CALL_H_
+#define V8_INSPECTOR_V8_FUNCTION_CALL_H_
#include "src/inspector/string-16.h"
@@ -62,4 +62,4 @@ class V8FunctionCall {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8FUNCTIONCALL_H_
+#endif // V8_INSPECTOR_V8_FUNCTION_CALL_H_
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index 7491a80f10..5c2107d573 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
-#define V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -66,4 +66,4 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 18f9139d63..6a3ee3d386 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
-#define V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#ifndef V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
+#define V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
#include "include/v8.h"
@@ -50,4 +50,4 @@ class V8InjectedScriptHost {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#endif // V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 92e7b21960..0627eae317 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8INSPECTORIMPL_H_
-#define V8_INSPECTOR_V8INSPECTORIMPL_H_
+#ifndef V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
+#define V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
#include <functional>
#include <map>
@@ -154,4 +154,4 @@ class V8InspectorImpl : public V8Inspector {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INSPECTORIMPL_H_
+#endif // V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 4fb924f749..8ca0915b66 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
-#define V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#ifndef V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
+#define V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
#include <vector>
@@ -126,4 +126,4 @@ class V8InspectorSessionImpl : public V8InspectorSession,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#endif // V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-internal-value-type.h b/deps/v8/src/inspector/v8-internal-value-type.h
index e648a0d4a3..991919a82e 100644
--- a/deps/v8/src/inspector/v8-internal-value-type.h
+++ b/deps/v8/src/inspector/v8-internal-value-type.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INTERNALVALUETYPE_H_
-#define V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#ifndef V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
+#define V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
#include "include/v8.h"
@@ -20,4 +20,4 @@ v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context>,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#endif // V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index e758a900fa..a68ea1144c 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
-#define V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
#include <vector>
@@ -85,4 +85,4 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
index b4b1f8ce13..0c4136fc8b 100644
--- a/deps/v8/src/inspector/v8-regex.h
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8REGEX_H_
-#define V8_INSPECTOR_V8REGEX_H_
+#ifndef V8_INSPECTOR_V8_REGEX_H_
+#define V8_INSPECTOR_V8_REGEX_H_
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -34,4 +34,4 @@ class V8Regex {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8REGEX_H_
+#endif // V8_INSPECTOR_V8_REGEX_H_
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 22d48e23bf..6975f35e71 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -255,13 +255,12 @@ void V8RuntimeAgentImpl::evaluate(
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
v8::MaybeLocal<v8::Value> maybeResultValue;
- v8::Local<v8::Script> script;
- if (m_inspector->compileScript(scope.context(), expression, String16())
- .ToLocal(&script)) {
+ {
v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
- maybeResultValue = script->Run(scope.context());
- }
+ maybeResultValue = v8::debug::EvaluateGlobal(
+ m_inspector->isolate(), toV8String(m_inspector->isolate(), expression));
+ } // Run microtasks before returning result.
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index cc63b697c9..790654da08 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
-#define V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -129,4 +129,4 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.h b/deps/v8/src/inspector/v8-schema-agent-impl.h
index e733aa0d5a..b96cce1401 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.h
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
-#define V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -33,4 +33,4 @@ class V8SchemaAgentImpl : public protocol::Schema::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 08d98110ae..87d2b0f027 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8STACKTRACEIMPL_H_
-#define V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#ifndef V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
+#define V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
#include <memory>
#include <vector>
@@ -145,4 +145,4 @@ class AsyncStackTrace {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#endif // V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-value-utils.h b/deps/v8/src/inspector/v8-value-utils.h
index 4d7b77077f..029fee224b 100644
--- a/deps/v8/src/inspector/v8-value-utils.h
+++ b/deps/v8/src/inspector/v8-value-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8VALUEUTILS_H_
-#define V8_INSPECTOR_V8VALUEUTILS_H_
+#ifndef V8_INSPECTOR_V8_VALUE_UTILS_H_
+#define V8_INSPECTOR_V8_VALUE_UTILS_H_
#include "src/inspector/protocol/Protocol.h"
@@ -23,4 +23,4 @@ protocol::Response toProtocolValue(v8::Local<v8::Context>, v8::Local<v8::Value>,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8VALUEUTILS_H_
+#endif // V8_INSPECTOR_V8_VALUE_UTILS_H_
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
index 2162edee67..9bd33c0bc8 100644
--- a/deps/v8/src/inspector/wasm-translation.h
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_WASMTRANSLATION_H_
-#define V8_INSPECTOR_WASMTRANSLATION_H_
+#ifndef V8_INSPECTOR_WASM_TRANSLATION_H_
+#define V8_INSPECTOR_WASM_TRANSLATION_H_
#include <unordered_map>
@@ -72,4 +72,4 @@ class WasmTranslation {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_WASMTRANSLATION_H_
+#endif // V8_INSPECTOR_WASM_TRANSLATION_H_
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
new file mode 100644
index 0000000000..7d00ea5434
--- /dev/null
+++ b/deps/v8/src/instruction-stream.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/instruction-stream.h"
+
+#include "src/builtins/builtins.h"
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+InstructionStream::InstructionStream(Code* code)
+ : builtin_index_(code->builtin_index()) {
+ DCHECK(Builtins::IsOffHeapBuiltin(code));
+ const size_t page_size = AllocatePageSize();
+ byte_length_ =
+ RoundUp(static_cast<size_t>(code->instruction_size()), page_size);
+
+ bytes_ = static_cast<uint8_t*>(AllocatePages(
+ GetRandomMmapAddr(), byte_length_, page_size, PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(bytes_);
+
+ std::memcpy(bytes_, code->instruction_start(), code->instruction_size());
+ CHECK(SetPermissions(bytes_, byte_length_, PageAllocator::kReadExecute));
+}
+
+InstructionStream::~InstructionStream() {
+ CHECK(FreePages(bytes_, byte_length_));
+}
+
+// static
+Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
+ DCHECK(FLAG_stress_off_heap_code);
+ // TODO(jgruber,v8:6666): Replace with binary search through range checks
+ // once off-heap code is mapped into a contiguous memory space.
+ for (const InstructionStream* stream : isolate->off_heap_code_) {
+ if (stream->Contains(address)) {
+ return isolate->builtins()->builtin(stream->builtin_index());
+ }
+ }
+ return nullptr;
+}
+
+// static
+InstructionStream* InstructionStream::TryLookupInstructionStream(
+ Isolate* isolate, Code* code) {
+ DCHECK(FLAG_stress_off_heap_code);
+ // TODO(jgruber,v8:6666): Replace with binary search through range checks
+ // once off-heap code is mapped into a contiguous memory space.
+ const int builtin_index = code->builtin_index();
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ for (InstructionStream* stream : isolate->off_heap_code_) {
+ if (stream->builtin_index() == builtin_index) return stream;
+ }
+ return nullptr;
+}
+
+bool InstructionStream::Contains(Address address) const {
+ return bytes_ <= address && address < bytes_ + byte_length_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/instruction-stream.h b/deps/v8/src/instruction-stream.h
new file mode 100644
index 0000000000..750e94a955
--- /dev/null
+++ b/deps/v8/src/instruction-stream.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSTRUCTION_STREAM_H_
+#define V8_INSTRUCTION_STREAM_H_
+
+#include "src/base/macros.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+
+// Wraps an mmap'ed off-heap instruction stream. This class will likely become
+// unneeded once --stress-off-heap-code is removed.
+class InstructionStream final {
+ public:
+ explicit InstructionStream(Code* code);
+ ~InstructionStream();
+
+ // Returns the corresponding Code object if it exists, and nullptr otherwise.
+ static Code* TryLookupCode(Isolate* isolate, Address address);
+
+ // Returns the corresponding stream if it exists, and nullptr otherwise.
+ static InstructionStream* TryLookupInstructionStream(Isolate* isolate,
+ Code* code);
+
+ bool Contains(Address address) const;
+
+ int builtin_index() const { return builtin_index_; }
+ size_t byte_length() const { return byte_length_; }
+ uint8_t* bytes() const { return bytes_; }
+
+ private:
+ size_t byte_length_;
+ uint8_t* bytes_;
+ int builtin_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionStream)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INSTRUCTION_STREAM_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 3b466aceb9..9771f0e00c 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -284,6 +284,21 @@ void StringAtDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void StringSubstringDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kString, kFrom, kTo
+ // TODO(turbofan): Allow builtins to return untagged values.
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::IntPtr(), MachineType::IntPtr()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StringSubstringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void TypeConversionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ArgumentRegister()};
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 12b25a510a..dd704144de 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CALL_INTERFACE_DESCRIPTOR_H_
-#define V8_CALL_INTERFACE_DESCRIPTOR_H_
+#ifndef V8_INTERFACE_DESCRIPTORS_H_
+#define V8_INTERFACE_DESCRIPTORS_H_
#include <memory>
@@ -30,7 +30,6 @@ class PlatformInterfaceDescriptor;
V(StoreTransition) \
V(StoreGlobal) \
V(StoreGlobalWithVector) \
- V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FastNewArguments) \
@@ -63,6 +62,7 @@ class PlatformInterfaceDescriptor;
V(BinaryOp) \
V(StringAdd) \
V(StringAt) \
+ V(StringSubstring) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
@@ -80,6 +80,7 @@ class PlatformInterfaceDescriptor;
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
V(RunMicrotasks) \
+ V(PromiseReactionHandler) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -514,12 +515,6 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
}
};
-class FastNewClosureDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kSharedFunctionInfo, kVector, kSlot)
- DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
-};
-
class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kSlots)
@@ -770,6 +765,13 @@ class StringAtDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class StringSubstringDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kString, kFrom, kTo)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringSubstringDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
@@ -884,6 +886,13 @@ class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
0)
};
+class PromiseReactionHandlerDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArgument, kGenerator)
+ DECLARE_DEFAULT_DESCRIPTOR(PromiseReactionHandlerDescriptor,
+ CallInterfaceDescriptor, 2)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
@@ -917,4 +926,4 @@ INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
#include "src/arm/interface-descriptors-arm.h"
#endif
-#endif // V8_CALL_INTERFACE_DESCRIPTOR_H_
+#endif // V8_INTERFACE_DESCRIPTORS_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 784bb14eb6..2a5923b2a4 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/feedback-vector.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -125,6 +126,11 @@ uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
return GetUnsignedOperand(operand_index, operand_type);
}
+FeedbackSlot BytecodeArrayAccessor::GetSlotOperand(int operand_index) const {
+ int index = GetIndexOperand(operand_index);
+ return FeedbackVector::ToSlot(index);
+}
+
Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
@@ -206,12 +212,18 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
const {
- DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
-
- uint32_t table_start = GetIndexOperand(0);
- uint32_t table_size = GetUnsignedImmediateOperand(1);
- int32_t case_value_base = GetImmediateOperand(2);
-
+ uint32_t table_start, table_size;
+ int32_t case_value_base;
+ if (current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
+ table_start = GetIndexOperand(1);
+ table_size = GetUnsignedImmediateOperand(2);
+ case_value_base = 0;
+ } else {
+ DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+ table_start = GetIndexOperand(0);
+ table_size = GetUnsignedImmediateOperand(1);
+ case_value_base = GetImmediateOperand(2);
+ }
return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index d585e6dc33..f31d2d0e7f 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -83,6 +83,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
uint32_t GetUnsignedImmediateOperand(int operand_index) const;
int32_t GetImmediateOperand(int operand_index) const;
uint32_t GetIndexOperand(int operand_index) const;
+ FeedbackSlot GetSlotOperand(int operand_index) const;
uint32_t GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
int GetRegisterOperandRange(int operand_index) const;
@@ -130,4 +131,4 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index dcbe8029f9..2d156e4095 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -92,7 +92,7 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
register_count = register_optimizer_->maxiumum_register_index() + 1;
}
- Handle<FixedArray> handler_table =
+ Handle<ByteArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
return bytecode_array_writer_.ToBytecodeArray(
isolate, register_count, parameter_count(), handler_table);
@@ -973,8 +973,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEmptyObjectLiteral() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::GetTemplateObject(
- size_t template_object_description_entry) {
- OutputGetTemplateObject(template_object_description_entry);
+ size_t template_object_description_entry, int feedback_slot) {
+ OutputGetTemplateObject(template_object_description_entry, feedback_slot);
return *this;
}
@@ -1271,16 +1271,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState(
- Register generator) {
- OutputRestoreGeneratorState(generator);
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnGeneratorState(
+ Register generator, BytecodeJumpTable* jump_table) {
+ DCHECK_EQ(jump_table->case_value_base(), 0);
+ BytecodeNode node(CreateSwitchOnGeneratorStateNode(
+ generator, jump_table->constant_pool_index(), jump_table->size()));
+ WriteSwitch(&node, jump_table);
+ LeaveBasicBlock();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
- Register generator, Register generator_state, RegisterList registers) {
- OutputResumeGenerator(generator, generator_state, registers,
- registers.register_count());
+ Register generator, RegisterList registers) {
+ OutputResumeGenerator(generator, registers, registers.register_count());
return *this;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 021222abe5..05086bf714 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -240,7 +240,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Gets or creates the template for a TemplateObjectDescription which will
// be inserted at constant pool index |template_object_description_entry|.
BytecodeArrayBuilder& GetTemplateObject(
- size_t template_object_description_entry);
+ size_t template_object_description_entry, int feedback_slot);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -354,6 +354,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+ // JavaScript defines two kinds of 'nil'.
+ enum NilValue { kNullValue, kUndefinedValue };
+
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
int feedback_slot);
@@ -430,9 +433,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& SuspendGenerator(Register generator,
RegisterList registers,
int suspend_id);
- BytecodeArrayBuilder& RestoreGeneratorState(Register generator);
+ BytecodeArrayBuilder& SwitchOnGeneratorState(Register generator,
+ BytecodeJumpTable* jump_table);
BytecodeArrayBuilder& ResumeGenerator(Register generator,
- Register generator_state,
RegisterList registers);
// Exception handling.
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 9aea3d83fa..81f49baeea 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -38,7 +38,7 @@ BytecodeArrayWriter::BytecodeArrayWriter(
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) {
+ Handle<ByteArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
@@ -158,6 +158,7 @@ void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
case Bytecode::kAbort:
case Bytecode::kJump:
case Bytecode::kJumpConstant:
+ case Bytecode::kSuspendGenerator:
exit_seen_in_block_ = true;
break;
default:
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index c53df10129..9700d2c1cf 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -43,7 +43,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
int parameter_count,
- Handle<FixedArray> handler_table);
+ Handle<ByteArray> handler_table);
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index ee94e7a2e2..997c5a8da8 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -853,8 +853,22 @@ class BytecodeGenerator::IteratorRecord final {
Register next_;
};
+#ifdef DEBUG
+
+static bool IsInEagerLiterals(
+ FunctionLiteral* literal,
+ const ZoneVector<FunctionLiteral*>& eager_literals) {
+ for (FunctionLiteral* eager_literal : eager_literals) {
+ if (literal == eager_literal) return true;
+ }
+ return false;
+}
+
+#endif // DEBUG
+
BytecodeGenerator::BytecodeGenerator(
- CompilationInfo* info, const AstStringConstants* ast_string_constants)
+ CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals)
: zone_(info->zone()),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
@@ -863,6 +877,7 @@ BytecodeGenerator::BytecodeGenerator(
ast_string_constants_(ast_string_constants),
closure_scope_(info->scope()),
current_scope_(info->scope()),
+ eager_inner_literals_(eager_inner_literals),
feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())),
globals_builder_(new (zone()) GlobalDeclarationsBuilder(zone())),
block_coverage_builder_(nullptr),
@@ -878,7 +893,7 @@ BytecodeGenerator::BytecodeGenerator(
execution_result_(nullptr),
incoming_new_target_or_generator_(),
generator_jump_table_(nullptr),
- generator_state_(),
+ suspend_count_(0),
loop_depth_(0),
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
@@ -1091,8 +1106,6 @@ void BytecodeGenerator::GenerateBytecodeBody() {
void BytecodeGenerator::AllocateTopLevelRegisters() {
if (info()->literal()->CanSuspend()) {
- // Allocate a register for generator_state_.
- generator_state_ = register_allocator()->NewRegister();
// Either directly use generator_object_var or allocate a new register for
// the incoming generator object.
Variable* generator_object_var = closure_scope()->generator_object_var();
@@ -1115,81 +1128,19 @@ void BytecodeGenerator::AllocateTopLevelRegisters() {
}
}
-void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
- LoopBuilder* loop_builder) {
- VisitIterationHeader(stmt->first_suspend_id(), stmt->suspend_count(),
- loop_builder);
-}
-
-void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
- int suspend_count,
- LoopBuilder* loop_builder) {
- // Recall that suspend_count is always zero inside ordinary (i.e.
- // non-generator) functions.
- if (suspend_count == 0) {
- loop_builder->LoopHeader();
- } else {
- loop_builder->LoopHeaderInGenerator(&generator_jump_table_,
- first_suspend_id, suspend_count);
-
- // Perform state dispatch on the generator state, assuming this is a resume.
- builder()
- ->LoadAccumulatorWithRegister(generator_state_)
- .SwitchOnSmiNoFeedback(generator_jump_table_);
-
- // We fall through when the generator state is not in the jump table. If we
- // are not resuming, we want to fall through to the loop body.
- // TODO(leszeks): Only generate this test for debug builds, we can skip it
- // entirely in release assuming that the generator states is always valid.
- BytecodeLabel not_resuming;
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .CompareOperation(Token::Value::EQ_STRICT, generator_state_)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
-
- // Otherwise this is an error.
- builder()->Abort(AbortReason::kInvalidJumpTableIndex);
-
- builder()->Bind(&not_resuming);
- }
-}
-
void BytecodeGenerator::BuildGeneratorPrologue() {
DCHECK_GT(info()->literal()->suspend_count(), 0);
- DCHECK(generator_state_.is_valid());
DCHECK(generator_object().is_valid());
generator_jump_table_ =
builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
- BytecodeLabel regular_call;
- builder()
- ->LoadAccumulatorWithRegister(generator_object())
- .JumpIfUndefined(&regular_call);
-
- // This is a resume call. Restore the current context and the registers,
- // then perform state dispatch.
- {
- RegisterAllocationScope register_scope(this);
- Register generator_context = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object())
- .PushContext(generator_context)
- .RestoreGeneratorState(generator_object())
- .StoreAccumulatorInRegister(generator_state_)
- .SwitchOnSmiNoFeedback(generator_jump_table_);
- }
- // We fall through when the generator state is not in the jump table.
- // TODO(leszeks): Only generate this for debug builds.
- builder()->Abort(AbortReason::kInvalidJumpTableIndex);
+ // If the generator is not undefined, this is a resume, so perform state
+ // dispatch.
+ builder()->SwitchOnGeneratorState(generator_object(), generator_jump_table_);
- // This is a regular call.
- builder()
- ->Bind(&regular_call)
- .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
- // Now fall through to the ordinary function prologue, after which we will run
- // into the generator object creation and other extra code inserted by the
- // parser.
+ // Otherwise, fall-through to the ordinary function prologue, after which we
+ // will run into the generator object creation and other extra code inserted
+ // by the parser.
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -1274,6 +1225,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
FeedbackSlot literal_slot = GetCachedCreateClosureSlot(decl->fun());
globals_builder()->AddFunctionDeclaration(variable->raw_name(), slot,
literal_slot, decl->fun());
+ AddToEagerLiteralsIfEager(decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -1306,6 +1258,8 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
+ DCHECK_IMPLIES(decl->fun()->ShouldEagerCompile(),
+ IsInEagerLiterals(decl->fun(), *eager_inner_literals_));
}
void BytecodeGenerator::VisitModuleNamespaceImports() {
@@ -1505,11 +1459,11 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
} else {
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_backbranch(zone());
@@ -1528,7 +1482,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
return;
}
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1552,7 +1506,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
return;
}
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1670,7 +1624,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
{
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
@@ -1694,7 +1648,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
VisitForEffect(stmt->assign_iterator());
VisitForEffect(stmt->assign_next());
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
builder()->SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
TypeHint type_hint = VisitForAccumulatorValue(stmt->result_done());
@@ -1832,6 +1786,14 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
FeedbackSlot slot = GetCachedCreateClosureSlot(expr);
builder()->CreateClosure(entry, feedback_index(slot), flags);
function_literals_.push_back(std::make_pair(expr, entry));
+ AddToEagerLiteralsIfEager(expr);
+}
+
+void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
+ if (eager_inner_literals_ && literal->ShouldEagerCompile()) {
+ DCHECK(!IsInEagerLiterals(literal, *eager_inner_literals_));
+ eager_inner_literals_->push_back(literal);
+ }
}
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
@@ -1867,6 +1829,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) {
+ DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD);
Register key = register_allocator()->GrowRegisterList(&args);
BuildLoadPropertyKey(property, key);
@@ -1884,7 +1847,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
.Bind(&done);
}
- if (property->kind() == ClassLiteral::Property::FIELD) {
+ if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
// Initialize field's name variable with the computed name.
DCHECK_NOT_NULL(property->computed_name_var());
builder()->LoadAccumulatorWithRegister(key);
@@ -1892,11 +1855,19 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
HoleCheckMode::kElided);
}
}
- if (property->kind() == ClassLiteral::Property::FIELD) {
+
+ if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
+ } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
+ builder()->CallRuntime(Runtime::kCreatePrivateFieldSymbol);
+ DCHECK_NOT_NULL(property->private_field_name_var());
+ BuildVariableAssignment(property->private_field_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ continue;
}
+
Register value = register_allocator()->GrowRegisterList(&args);
VisitForRegisterValue(property->value(), value);
}
@@ -1976,12 +1947,18 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
ClassLiteral::Property* property = expr->fields()->at(i);
if (property->is_computed_name()) {
+ DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD);
Variable* var = property->computed_name_var();
DCHECK_NOT_NULL(var);
// The computed name is already evaluated and stored in a
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
+ } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
+ Variable* private_field_name_var = property->private_field_name_var();
+ DCHECK_NOT_NULL(private_field_name_var);
+ BuildVariableLoad(private_field_name_var, HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(key);
} else {
BuildLoadPropertyKey(property, key);
}
@@ -1989,7 +1966,11 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
VisitForRegisterValue(property->value(), value);
VisitSetHomeObject(value, constructor, property);
- builder()->CallRuntime(Runtime::kCreateDataProperty, args);
+ Runtime::FunctionId function_id =
+ property->kind() == ClassLiteral::Property::PUBLIC_FIELD
+ ? Runtime::kCreateDataProperty
+ : Runtime::kAddPrivateField;
+ builder()->CallRuntime(function_id, args);
}
}
@@ -2140,7 +2121,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
+ V8_FALLTHROUGH;
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
@@ -2563,7 +2544,7 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
BuildVariableLoad(var_promise, HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(promise)
- .CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args)
+ .CallRuntime(Runtime::kInlineResolvePromise, args)
.LoadAccumulatorWithRegister(promise);
}
@@ -2863,32 +2844,33 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
VisitAssignment(expr);
}
-// Suspends the generator to resume at |suspend_id|, with output stored in the
-// accumulator. When the generator is resumed, the sent value is loaded in the
-// accumulator.
-void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
+// Suspends the generator to resume at the next suspend_id, with output stored
+// in the accumulator. When the generator is resumed, the sent value is loaded
+// in the accumulator.
+void BytecodeGenerator::BuildSuspendPoint(Expression* suspend_expr) {
+ const int suspend_id = suspend_count_++;
+
RegisterList registers = register_allocator()->AllLiveRegisters();
- // Save context, registers, and state. Then return.
+ // Save context, registers, and state. This bytecode then returns the value
+ // in the accumulator.
+ builder()->SetExpressionPosition(suspend_expr);
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
- builder()->SetReturnPosition(kNoSourcePosition, info()->literal());
- builder()->Return(); // Hard return (ignore any finally blocks).
-
// Upon resume, we continue here.
builder()->Bind(generator_jump_table_, suspend_id);
- // Clobbers all registers, updating the state to indicate that we have
- // finished resuming and setting the accumulator to the [[input_or_debug_pos]]
- // slot of the generator object.
- builder()->ResumeGenerator(generator_object(), generator_state_, registers);
+ // Clobbers all registers and sets the accumulator to the
+ // [[input_or_debug_pos]] slot of the generator object.
+ builder()->ResumeGenerator(generator_object(), registers);
}
void BytecodeGenerator::VisitYield(Yield* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- if (!expr->IsInitialYield()) {
+ // If this is not the first yield
+ if (suspend_count_ > 0) {
if (IsAsyncGeneratorFunction(function_kind())) {
// AsyncGenerator yields (with the exception of the initial yield)
// delegate work to the AsyncGeneratorYield stub, which Awaits the operand
@@ -2914,7 +2896,7 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
}
}
- BuildSuspendPoint(expr->suspend_id());
+ BuildSuspendPoint(expr);
// At this point, the generator has been resumed, with the received value in
// the accumulator.
@@ -3053,10 +3035,16 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// visible to the user, and we therefore neither pass the block coverage
// builder nor the expression.
//
- // YieldStar in AsyncGenerator functions includes 3 suspend points, rather
- // than 1. These are documented in the YieldStar AST node.
+ // In addition to the normal suspend for yield*, a yield* in an async
+ // generator has 2 additional suspends:
+ // - One for awaiting the iterator result of closing the generator when
+ // resumed with a "throw" completion, and a throw method is not
+ // present on the delegated iterator
+ // - One for awaiting the iterator result yielded by the delegated
+ // iterator
+
LoopBuilder loop(builder(), nullptr, nullptr);
- VisitIterationHeader(expr->suspend_id(), expr->suspend_count(), &loop);
+ loop.LoopHeader();
{
BytecodeLabels after_switch(zone());
@@ -3110,7 +3098,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// If there is no "throw" method, perform IteratorClose, and finally
// throw a TypeError.
no_throw_method.Bind(builder());
- BuildIteratorClose(iterator, expr->await_iterator_close_suspend_id());
+ BuildIteratorClose(iterator, expr);
builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
}
@@ -3119,7 +3107,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kAsync) {
// Await the result of the method invocation.
- BuildAwait(expr->await_delegated_iterator_output_suspend_id());
+ BuildAwait(expr);
}
// Check that output is an object.
@@ -3159,7 +3147,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.CallRuntime(Runtime::kInlineAsyncGeneratorYield, args);
}
- BuildSuspendPoint(expr->suspend_id());
+ BuildSuspendPoint(expr);
builder()->StoreAccumulatorInRegister(input);
builder()
->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
@@ -3195,7 +3183,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
builder()->LoadAccumulatorWithRegister(output_value);
}
-void BytecodeGenerator::BuildAwait(int suspend_id) {
+void BytecodeGenerator::BuildAwait(Expression* await_expr) {
// Rather than HandlerTable::UNCAUGHT, async functions use
// HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are
// transformed into promise rejections. This is necessary to prevent emitting
@@ -3208,22 +3196,20 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
// Await(operand) and suspend.
RegisterAllocationScope register_scope(this);
- int await_builtin_context_index;
+ Runtime::FunctionId id;
RegisterList args;
if (IsAsyncGeneratorFunction(function_kind())) {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT
- : Context::ASYNC_GENERATOR_AWAIT_CAUGHT;
+ id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncGeneratorAwaitUncaught
+ : Runtime::kInlineAsyncGeneratorAwaitCaught;
args = register_allocator()->NewRegisterList(2);
builder()
->MoveRegister(generator_object(), args[0])
.StoreAccumulatorInRegister(args[1]);
} else {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX
- : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX;
+ id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncFunctionAwaitUncaught
+ : Runtime::kInlineAsyncFunctionAwaitCaught;
args = register_allocator()->NewRegisterList(3);
builder()
->MoveRegister(generator_object(), args[0])
@@ -3236,10 +3222,10 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
builder()->StoreAccumulatorInRegister(args[2]);
}
- builder()->CallJSRuntime(await_builtin_context_index, args);
+ builder()->CallRuntime(id, args);
}
- BuildSuspendPoint(suspend_id);
+ BuildSuspendPoint(await_expr);
Register input = register_allocator()->NewRegister();
Register resume_mode = register_allocator()->NewRegister();
@@ -3267,7 +3253,7 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
void BytecodeGenerator::VisitAwait(Await* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- BuildAwait(expr->suspend_id());
+ BuildAwait(expr);
BuildIncrementBlockCoverageCounterIfEnabled(expr,
SourceRangeKind::kContinuation);
}
@@ -3914,7 +3900,8 @@ void BytecodeGenerator::VisitNaryOperation(NaryOperation* expr) {
}
}
-void BytecodeGenerator::BuildLiteralCompareNil(Token::Value op, NilValue nil) {
+void BytecodeGenerator::BuildLiteralCompareNil(
+ Token::Value op, BytecodeArrayBuilder::NilValue nil) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
switch (test_result->fallthrough()) {
@@ -3953,11 +3940,11 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} else if (expr->IsLiteralCompareUndefined(&sub_expr)) {
VisitForAccumulatorValue(sub_expr);
builder()->SetExpressionPosition(expr);
- BuildLiteralCompareNil(expr->op(), kUndefinedValue);
+ BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kUndefinedValue);
} else if (expr->IsLiteralCompareNull(&sub_expr)) {
VisitForAccumulatorValue(sub_expr);
builder()->SetExpressionPosition(expr);
- BuildLiteralCompareNil(expr->op(), kNullValue);
+ BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kNullValue);
} else {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
@@ -4154,7 +4141,7 @@ void BytecodeGenerator::BuildCallIteratorMethod(Register iterator,
}
void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
- int suspend_id) {
+ Expression* expr) {
RegisterAllocationScope register_scope(this);
BytecodeLabels done(zone());
BytecodeLabel if_called;
@@ -4165,8 +4152,8 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
builder()->Bind(&if_called);
if (iterator.type() == IteratorType::kAsync) {
- DCHECK_GE(suspend_id, 0);
- BuildAwait(suspend_id);
+ DCHECK_NOT_NULL(expr);
+ BuildAwait(expr);
}
builder()->JumpIfJSReceiver(done.New());
@@ -4190,7 +4177,8 @@ void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
builder()->SetExpressionPosition(expr);
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
template_objects_.push_back(std::make_pair(expr, entry));
- builder()->GetTemplateObject(entry);
+ FeedbackSlot literal_slot = feedback_spec()->AddLiteralSlot();
+ builder()->GetTemplateObject(entry, feedback_index(literal_slot));
}
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index f9de9550fe..c96e5e9e83 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -28,8 +28,9 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
- explicit BytecodeGenerator(CompilationInfo* info,
- const AstStringConstants* ast_string_constants);
+ explicit BytecodeGenerator(
+ CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
@@ -126,7 +127,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildVariableAssignment(
Variable* variable, Token::Value op, HoleCheckMode hole_check_mode,
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
- void BuildLiteralCompareNil(Token::Value compare_op, NilValue nil);
+ void BuildLiteralCompareNil(Token::Value compare_op,
+ BytecodeArrayBuilder::NilValue nil);
void BuildReturn(int source_position = kNoSourcePosition);
void BuildAsyncReturn(int source_position = kNoSourcePosition);
void BuildAsyncGeneratorReturn();
@@ -146,9 +148,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalWithContext(Scope* scope);
void BuildGeneratorPrologue();
- void BuildSuspendPoint(int suspend_id);
+ void BuildSuspendPoint(Expression* suspend_expr);
- void BuildAwait(int suspend_id);
+ void BuildAwait(Expression* await_expr);
void BuildGetIterator(Expression* iterable, IteratorType hint);
@@ -164,7 +166,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
IteratorRecord BuildGetIteratorRecord(Expression* iterable,
IteratorType hint);
void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
- void BuildIteratorClose(const IteratorRecord& iterator, int suspend_id = -1);
+ void BuildIteratorClose(const IteratorRecord& iterator,
+ Expression* expr = nullptr);
void BuildCallIteratorMethod(Register iterator, const AstRawString* method,
RegisterList receiver_and_args,
BytecodeLabel* if_called,
@@ -212,11 +215,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeLabels* end_labels,
int coverage_slot);
- // Visit the header/body of a loop iteration.
- void VisitIterationHeader(IterationStatement* stmt,
- LoopBuilder* loop_builder);
- void VisitIterationHeader(int first_suspend_id, int suspend_count,
- LoopBuilder* loop_builder);
+ // Visit the body of a loop iteration.
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
// Visit a statement and switch scopes, the context is in the accumulator.
@@ -263,6 +262,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Variable* variable);
FeedbackSlot GetCachedCreateClosureSlot(FunctionLiteral* literal);
+ void AddToEagerLiteralsIfEager(FunctionLiteral* literal);
+
static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) {
return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
: ToBooleanMode::kConvertToBoolean;
@@ -324,6 +325,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
DeclarationScope* closure_scope_;
Scope* current_scope_;
+ // External vector of literals to be eagerly compiled.
+ ZoneVector<FunctionLiteral*>* eager_inner_literals_;
+
FeedbackSlotCache* feedback_slot_cache_;
GlobalDeclarationsBuilder* globals_builder_;
@@ -344,7 +348,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register incoming_new_target_or_generator_;
BytecodeJumpTable* generator_jump_table_;
- Register generator_state_;
+ int suspend_count_;
int loop_depth_;
HandlerTable::CatchPrediction catch_prediction_;
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 61173a8341..88cdae6ce5 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -200,6 +200,17 @@ bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
return false;
}
+// static
+bool Bytecodes::IsRegisterListOperandType(OperandType operand_type) {
+ switch (operand_type) {
+ case OperandType::kRegList:
+ case OperandType::kRegOutList:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true;
switch (bytecode) {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index ce01566d52..293c0562e9 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -233,7 +233,8 @@ namespace interpreter {
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
\
/* Tagged templates */ \
- V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx) \
\
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
@@ -314,11 +315,12 @@ namespace interpreter {
V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
\
/* Generators */ \
- V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \
- V(SuspendGenerator, AccumulatorUse::kNone, OperandType::kReg, \
+ V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \
V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegOut, OperandType::kRegOutList, OperandType::kRegCount) \
+ OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
@@ -432,6 +434,10 @@ namespace interpreter {
JUMP_FORWARD_BYTECODE_LIST(V) \
V(JumpLoop)
+#define RETURN_BYTECODE_LIST(V) \
+ V(Return) \
+ V(SuspendGenerator)
+
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -613,11 +619,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode <= Bytecode::kJumpIfJSReceiver;
}
- // Returns true if the bytecode is a conditional jump, a jump, or a return.
- static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn || IsJump(bytecode);
- }
-
// Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean.
@@ -627,7 +628,8 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if the bytecode is a switch.
static constexpr bool IsSwitch(Bytecode bytecode) {
- return bytecode == Bytecode::kSwitchOnSmiNoFeedback;
+ return bytecode == Bytecode::kSwitchOnSmiNoFeedback ||
+ bytecode == Bytecode::kSwitchOnGeneratorState;
}
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
@@ -681,9 +683,16 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
return true;
}
+ // Returns true if the bytecode returns.
+ static constexpr bool Returns(Bytecode bytecode) {
+#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME
+ return false RETURN_BYTECODE_LIST(OR_BYTECODE);
+#undef OR_BYTECODE
+ }
+
// Returns the number of values which |bytecode| returns.
static constexpr size_t ReturnCount(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn ? 1 : 0;
+ return Returns(bytecode) ? 1 : 0;
}
// Returns the number of operands expected by |bytecode|.
@@ -812,6 +821,9 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if |operand_type| represents a register used as an output.
static bool IsRegisterOutputOperandType(OperandType operand_type);
+ // Returns true if |operand_type| represents a register list operand.
+ static bool IsRegisterListOperandType(OperandType operand_type);
+
// Returns true if the handler for |bytecode| should look ahead and inline a
// dispatch to a Star bytecode.
static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index ea316f286f..bada935e4a 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,10 +47,6 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
- // Restore the parent jump table.
- if (generator_jump_table_location_ != nullptr) {
- *generator_jump_table_location_ = parent_generator_jump_table_;
- }
}
void LoopBuilder::LoopHeader() {
@@ -62,26 +58,6 @@ void LoopBuilder::LoopHeader() {
builder()->Bind(&loop_header_);
}
-void LoopBuilder::LoopHeaderInGenerator(
- BytecodeJumpTable** generator_jump_table, int first_resume_id,
- int resume_count) {
- // Bind all the resume points that are inside the loop to be at the loop
- // header.
- for (int id = first_resume_id; id < first_resume_id + resume_count; ++id) {
- builder()->Bind(*generator_jump_table, id);
- }
-
- // Create the loop header.
- LoopHeader();
-
- // Create a new jump table for after the loop header for only these
- // resume points.
- generator_jump_table_location_ = generator_jump_table;
- parent_generator_jump_table_ = *generator_jump_table;
- *generator_jump_table =
- builder()->AllocateJumpTable(resume_count, first_resume_id);
-}
-
void LoopBuilder::LoopBody() {
if (block_coverage_builder_ != nullptr) {
block_coverage_builder_->IncrementBlockCounter(block_coverage_body_slot_);
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 4a81b1f205..405e81bc76 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -105,9 +105,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
LoopBuilder(BytecodeArrayBuilder* builder,
BlockCoverageBuilder* block_coverage_builder, AstNode* node)
: BreakableControlFlowBuilder(builder, block_coverage_builder, node),
- continue_labels_(builder->zone()),
- generator_jump_table_location_(nullptr),
- parent_generator_jump_table_(nullptr) {
+ continue_labels_(builder->zone()) {
if (block_coverage_builder_ != nullptr) {
set_needs_continuation_counter();
block_coverage_body_slot_ =
@@ -118,8 +116,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
~LoopBuilder();
void LoopHeader();
- void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table,
- int first_resume_id, int resume_count);
void LoopBody();
void JumpToHeader(int loop_depth);
void BindContinueTarget();
@@ -138,13 +134,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
- // While we're in the loop, we want to have a different jump table for
- // generator switch statements. We restore it at the end of the loop.
- // TODO(leszeks): Storing a pointer to the BytecodeGenerator's jump table
- // field is ugly, figure out a better way to do this.
- BytecodeJumpTable** generator_jump_table_location_;
- BytecodeJumpTable* parent_generator_jump_table_;
-
int block_coverage_body_slot_;
};
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 4b6c44b95d..93db1e969a 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -15,20 +15,20 @@ namespace interpreter {
HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
+Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
- Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate->factory()->NewFixedArray(
- HandlerTable::LengthForRange(handler_table_size), TENURED));
+ Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray(
+ HandlerTable::LengthForRange(handler_table_size), TENURED);
+ HandlerTable table(*table_byte_array);
for (int i = 0; i < handler_table_size; ++i) {
Entry& entry = entries_[i];
HandlerTable::CatchPrediction pred = entry.catch_prediction_;
- table->SetRangeStart(i, static_cast<int>(entry.offset_start));
- table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
- table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
- table->SetRangeData(i, entry.context.index());
+ table.SetRangeStart(i, static_cast<int>(entry.offset_start));
+ table.SetRangeEnd(i, static_cast<int>(entry.offset_end));
+ table.SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
+ table.SetRangeData(i, entry.context.index());
}
- return table;
+ return table_byte_array;
}
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 381606f98b..021fefad29 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<HandlerTable> ToHandlerTable(Isolate* isolate);
+ Handle<ByteArray> ToHandlerTable(Isolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 846b69281e..b2c4ba2309 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -48,6 +48,8 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
made_call_(false),
reloaded_frame_ptr_(false),
bytecode_array_valid_(true),
+ speculation_poison_(FLAG_untrusted_code_mitigations ? SpeculationPoison()
+ : nullptr),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
#ifdef V8_TRACE_IGNITION
@@ -59,7 +61,7 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
// Save the bytecode offset immediately if bytecode will make a call along the
// critical path, or it is a return bytecode.
if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
- bytecode_ == Bytecode::kReturn) {
+ Bytecodes::Returns(bytecode)) {
SaveBytecodeOffset();
}
}
@@ -72,6 +74,24 @@ InterpreterAssembler::~InterpreterAssembler() {
UnregisterCallGenerationCallbacks();
}
+Node* InterpreterAssembler::PoisonOnSpeculationTagged(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ return BitcastWordToTagged(
+ WordAnd(speculation_poison_, BitcastTaggedToWord(value)));
+}
+
+Node* InterpreterAssembler::PoisonOnSpeculationWord(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ return WordAnd(speculation_poison_, value);
+}
+
+Node* InterpreterAssembler::PoisonOnSpeculationInt32(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ Node* truncated_speculation_poison =
+ Is64() ? TruncateInt64ToInt32(speculation_poison_) : speculation_poison_;
+ return Word32And(truncated_speculation_poison, value);
+}
+
Node* InterpreterAssembler::GetInterpretedFramePointer() {
if (!interpreted_frame_pointer_.IsBound()) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
@@ -139,7 +159,7 @@ Node* InterpreterAssembler::GetAccumulatorUnchecked() {
Node* InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
- return GetAccumulatorUnchecked();
+ return PoisonOnSpeculationTagged(GetAccumulatorUnchecked());
}
void InterpreterAssembler::SetAccumulator(Node* value) {
@@ -222,22 +242,27 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index));
+ return PoisonOnSpeculationWord(
+ IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
+}
+
+Node* InterpreterAssembler::RegisterLocation(Register reg) {
+ return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return TimesPointerSize(index);
}
-Node* InterpreterAssembler::LoadRegister(Register reg) {
- return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+ Node* value = Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index));
+ return PoisonOnSpeculationTagged(value);
}
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+Node* InterpreterAssembler::LoadRegister(Register reg) {
return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index));
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
@@ -245,22 +270,92 @@ Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
<< kPointerSizeLog2);
}
-Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- return StoreNoWriteBarrier(
+Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
+ return LoadRegister(BytecodeOperandRegUnpoisoned(operand_index));
+}
+
+std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
+ int operand_index) {
+ DCHECK_EQ(OperandType::kRegPair,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ return std::make_pair(LoadRegister(first_reg_index),
+ LoadRegister(second_reg_index));
+}
+
+InterpreterAssembler::RegListNodePair
+InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
+ DCHECK(Bytecodes::IsRegisterListOperandType(
+ Bytecodes::GetOperandType(bytecode_, operand_index)));
+ DCHECK_EQ(OperandType::kRegCount,
+ Bytecodes::GetOperandType(bytecode_, operand_index + 1));
+ Node* base_reg =
+ RegisterLocation(BytecodeOperandRegUnpoisoned(operand_index));
+ Node* reg_count = BytecodeOperandCount(operand_index + 1);
+ return RegListNodePair(base_reg, reg_count);
+}
+
+Node* InterpreterAssembler::LoadRegisterFromRegisterList(
+ const RegListNodePair& reg_list, int index) {
+ Node* location = RegisterLocationInRegisterList(reg_list, index);
+ // Location is already poisoned on speculation, so no need to poison here.
+ return Load(MachineType::AnyTagged(), location);
+}
+
+Node* InterpreterAssembler::RegisterLocationInRegisterList(
+ const RegListNodePair& reg_list, int index) {
+ CSA_ASSERT(this,
+ Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
+ Node* offset = RegisterFrameOffset(IntPtrConstant(index));
+ // Register indexes are negative, so subtract index from base location to get
+ // location.
+ return IntPtrSub(reg_list.base_reg_location(), offset);
+}
+
+void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+ StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
}
-Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index), value);
+void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index), value);
}
-Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
- Register reg) {
+void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
int offset = reg.ToOperand() << kPointerSizeLog2;
- return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+ StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+}
+
+void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
+ int operand_index) {
+ StoreRegister(value, BytecodeOperandRegUnpoisoned(operand_index));
+}
+
+void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
+ Node* value2,
+ int operand_index) {
+ DCHECK_EQ(OperandType::kRegOutPair,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ StoreRegister(value1, first_reg_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ StoreRegister(value2, second_reg_index);
+}
+
+void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
+ Node* value1, Node* value2, Node* value3, int operand_index) {
+ DCHECK_EQ(OperandType::kRegOutTriple,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ StoreRegister(value1, first_reg_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ StoreRegister(value2, second_reg_index);
+ Node* third_reg_index = NextRegister(second_reg_index);
+ StoreRegister(value3, third_reg_index);
}
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
@@ -273,7 +368,8 @@ Node* InterpreterAssembler::OperandOffset(int operand_index) {
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByteUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -282,7 +378,8 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
IntPtrAdd(BytecodeOffset(), operand_offset));
}
-Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedByteUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -291,7 +388,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
IntPtrAdd(BytecodeOffset(), operand_offset));
}
-compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned(
int relative_offset, MachineType result_type) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -324,7 +421,7 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
// Read the most signicant bytecode into bytes[0] and then in order
// down to least significant in bytes[count - 1].
DCHECK_LE(count, kMaxCount);
- compiler::Node* bytes[kMaxCount];
+ Node* bytes[kMaxCount];
for (int i = 0; i < count; i++) {
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
@@ -342,7 +439,8 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
return result;
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedShortUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -353,11 +451,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Uint16());
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShortUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -368,11 +468,13 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Int16());
}
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuadUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -382,11 +484,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Uint32());
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedQuadUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -396,44 +500,57 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Int32());
}
}
-Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
- OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeSignedOperandUnpoisoned(
+ int operand_index, OperandSize operand_size) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByte(operand_index);
+ return BytecodeOperandSignedByteUnpoisoned(operand_index);
case OperandSize::kShort:
- return BytecodeOperandSignedShort(operand_index);
+ return BytecodeOperandSignedShortUnpoisoned(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuad(operand_index);
+ return BytecodeOperandSignedQuadUnpoisoned(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
-Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
- OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeUnsignedOperandUnpoisoned(
+ int operand_index, OperandSize operand_size) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByte(operand_index);
+ return BytecodeOperandUnsignedByteUnpoisoned(operand_index);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShort(operand_index);
+ return BytecodeOperandUnsignedShortUnpoisoned(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuad(operand_index);
+ return BytecodeOperandUnsignedQuadUnpoisoned(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size) {
+ return PoisonOnSpeculationInt32(
+ BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size) {
+ return PoisonOnSpeculationInt32(
+ BytecodeUnsignedOperandUnpoisoned(operand_index, operand_size));
+}
+
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -464,7 +581,7 @@ Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
- return SmiFromWord32(BytecodeOperandUImm(operand_index));
+ return SmiFromInt32(BytecodeOperandUImm(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
@@ -480,7 +597,7 @@ Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
- return SmiFromWord32(BytecodeOperandImm(operand_index));
+ return SmiFromInt32(BytecodeOperandImm(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
@@ -499,13 +616,23 @@ Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(BytecodeOperandIdx(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandConstantPoolIdxUnpoisoned(
+ int operand_index) {
+ DCHECK_EQ(OperandType::kIdx,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return ChangeUint32ToWord(
+ BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeOperandRegUnpoisoned(int operand_index) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperand(operand_index, operand_size));
+ BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -539,18 +666,27 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
- return LoadFixedArrayElement(constant_pool, index);
+ return PoisonOnSpeculationTagged(LoadFixedArrayElement(constant_pool, index));
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
return SmiUntag(LoadConstantPoolEntry(index));
}
+Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
+ int operand_index) {
+ Node* index = BytecodeOperandConstantPoolIdxUnpoisoned(operand_index);
+ return LoadConstantPoolEntry(index);
+}
+
+Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
+ int operand_index) {
+ return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
+}
+
Node* InterpreterAssembler::LoadFeedbackVector() {
Node* function = LoadRegister(Register::function_closure());
- Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
- Node* vector = LoadObjectField(cell, Cell::kValueOffset);
- return vector;
+ return CodeStubAssembler::LoadFeedbackVector(function);
}
void InterpreterAssembler::CallPrologue() {
@@ -586,11 +722,11 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Comment("increment call count");
Node* call_count =
LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize);
- // The lowest {CallICNexus::CallCountField::kShift} bits of the call
+ // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
// count are used as flags. To increment the call count by 1 we hence
- // have to increment by 1 << {CallICNexus::CallCountField::kShift}.
- Node* new_count =
- SmiAdd(call_count, SmiConstant(1 << CallICNexus::CallCountField::kShift));
+ // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
+ Node* new_count = SmiAdd(
+ call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
SKIP_WRITE_BARRIER, kPointerSize);
@@ -707,18 +843,30 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
}
void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* first_arg, Node* arg_count,
+ Node* function, Node* context, const RegListNodePair& args,
ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
+
+ Node* args_count;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // The receiver is implied, so it is not in the argument list.
+ args_count = args.reg_count();
+ } else {
+ // Subtract the receiver from the argument count.
+ Node* receiver_count = Int32Constant(1);
+ args_count = Int32Sub(args.reg_count(), receiver_count);
+ }
+
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
- arg_count, first_arg, function);
+ args_count, args.base_reg_location(),
+ function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
@@ -764,8 +912,8 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
- Node* function, Node* context, Node* first_arg, Node* arg_count,
- Node* slot_id, Node* feedback_vector) {
+ Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
+ Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
CollectCallFeedback(function, context, feedback_vector, slot_id);
@@ -775,16 +923,19 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
+ Node* receiver_count = Int32Constant(1);
+ Node* args_count = Int32Sub(args.reg_count(), receiver_count);
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
- arg_count, first_arg, function);
+ args_count, args.base_reg_location(),
+ function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
Node* InterpreterAssembler::Construct(Node* target, Node* context,
- Node* new_target, Node* first_arg,
- Node* arg_count, Node* slot_id,
- Node* feedback_vector) {
+ Node* new_target,
+ const RegListNodePair& args,
+ Node* slot_id, Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
VARIABLE(var_result, MachineRepresentation::kTagged);
VARIABLE(var_site, MachineRepresentation::kTagged);
@@ -937,8 +1088,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- arg_count, new_target, target, var_site.value(),
- first_arg));
+ args.reg_count(), new_target, target,
+ var_site.value(), args.base_reg_location()));
Goto(&return_result);
}
@@ -950,8 +1101,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- arg_count, new_target, target, UndefinedConstant(),
- first_arg));
+ args.reg_count(), new_target, target,
+ UndefinedConstant(), args.base_reg_location()));
Goto(&return_result);
}
@@ -961,8 +1112,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
Node* new_target,
- Node* first_arg,
- Node* arg_count, Node* slot_id,
+ const RegListNodePair& args,
+ Node* slot_id,
Node* feedback_vector) {
// TODO(bmeurer): Unify this with the Construct bytecode feedback
// above once we have a way to pass the AllocationSite to the Array
@@ -1075,12 +1226,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), code_target, context, arg_count,
- new_target, target, UndefinedConstant(), first_arg);
+ return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
+ new_target, target, UndefinedConstant(),
+ args.base_reg_location());
}
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
- Node* first_arg, Node* arg_count,
+ const RegListNodePair& args,
int result_size) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
@@ -1099,7 +1251,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStubR(callable.descriptor(), result_size, code_target, context,
- arg_count, first_arg, function_entry);
+ args.reg_count(), args.base_reg_location(), function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
@@ -1132,7 +1284,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
BIND(&interrupt_check);
{
CallRuntime(Runtime::kInterrupt, GetContext());
- new_budget.Bind(Int32Constant(Interpreter::kInterruptBudget));
+ new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
Goto(&ok);
}
@@ -1169,7 +1321,7 @@ Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(TruncateWordToWord32(delta), backward);
+ UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
Node* new_bytecode_offset = Advance(delta, backward);
Node* target_bytecode = LoadBytecode(new_bytecode_offset);
return DispatchToBytecode(target_bytecode, new_bytecode_offset);
@@ -1200,7 +1352,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
-Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
+Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
Node* bytecode =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
return ChangeUint32ToWord(bytecode);
@@ -1236,7 +1388,7 @@ void InterpreterAssembler::InlineStar() {
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
- StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
+ StoreRegister(GetAccumulator(), BytecodeOperandRegUnpoisoned(0));
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
@@ -1267,24 +1419,29 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Load(MachineType::Pointer(), DispatchTableRawPointer(),
TimesPointerSize(target_bytecode));
- return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
+ target_bytecode);
}
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset) {
+ Node* bytecode_offset,
+ Node* target_bytecode) {
// TODO(ishell): Add CSA::CodeEntryPoint(code).
Node* handler_entry =
IntPtrAdd(BitcastTaggedToWord(handler),
IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
+ target_bytecode);
}
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
- Node* handler_entry, Node* bytecode_offset) {
+ Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
InterpreterDispatchDescriptor descriptor(isolate());
+ // Propagate speculation poisoning.
+ Node* poisoned_handler_entry = PoisonOnSpeculationWord(handler_entry);
return TailCallBytecodeDispatch(
- descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
- BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
+ descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1319,7 +1476,8 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Load(MachineType::Pointer(), DispatchTableRawPointer(),
TimesPointerSize(target_index));
- DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
+ next_bytecode);
}
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
@@ -1342,7 +1500,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// of the first bytecode.
const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
- Node* profiling_weight = Int32Sub(TruncateWordToWord32(BytecodeOffset()),
+ Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
Int32Constant(kFirstBytecodeOffset));
UpdateInterruptBudget(profiling_weight, true);
}
@@ -1451,9 +1609,12 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
BIND(&ok);
}
-Node* InterpreterAssembler::ExportRegisterFile(Node* array,
- Node* register_count) {
+Node* InterpreterAssembler::ExportRegisterFile(
+ Node* array, const RegListNodePair& registers) {
+ Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
+ CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, register_count);
}
@@ -1483,9 +1644,12 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array,
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(Node* array,
- Node* register_count) {
+Node* InterpreterAssembler::ImportRegisterFile(
+ Node* array, const RegListNodePair& registers) {
+ Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
+ CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, register_count);
}
@@ -1587,8 +1751,7 @@ void InterpreterAssembler::DeserializeLazyAndDispatch() {
Node* target_handler =
CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
SmiTag(bytecode), SmiConstant(operand_scale()));
-
- DispatchToBytecodeHandler(target_handler, bytecode_offset);
+ DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 63d1709145..cb622d0b2d 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -56,9 +56,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImmSmi(int operand_index);
- // Returns the word-size sign-extended register index for bytecode operand
- // |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandReg(int operand_index);
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
@@ -86,31 +83,58 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
compiler::Node* depth, Label* target);
+ // A RegListNodePair provides an abstraction over lists of registers.
+ class RegListNodePair {
+ public:
+ RegListNodePair(Node* base_reg_location, Node* reg_count)
+ : base_reg_location_(base_reg_location), reg_count_(reg_count) {}
+
+ compiler::Node* reg_count() const { return reg_count_; }
+ compiler::Node* base_reg_location() const { return base_reg_location_; }
+
+ private:
+ compiler::Node* base_reg_location_;
+ compiler::Node* reg_count_;
+ };
+
// Backup/restore register file to/from a fixed array of the correct length.
compiler::Node* ExportRegisterFile(compiler::Node* array,
- compiler::Node* register_count);
+ const RegListNodePair& registers);
compiler::Node* ImportRegisterFile(compiler::Node* array,
- compiler::Node* register_count);
+ const RegListNodePair& registers);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
- compiler::Node* LoadRegister(compiler::Node* reg_index);
compiler::Node* LoadAndUntagRegister(Register reg);
- compiler::Node* StoreRegister(compiler::Node* value, Register reg);
- compiler::Node* StoreRegister(compiler::Node* value,
- compiler::Node* reg_index);
- compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
-
- // Returns the next consecutive register.
- compiler::Node* NextRegister(compiler::Node* reg_index);
-
- // Returns the location in memory of the register |reg_index| in the
- // interpreter register file.
- compiler::Node* RegisterLocation(compiler::Node* reg_index);
-
+ compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
+ std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
+ int operand_index);
+ void StoreRegister(compiler::Node* value, Register reg);
+ void StoreAndTagRegister(compiler::Node* value, Register reg);
+ void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
+ void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
+ compiler::Node* value2,
+ int operand_index);
+ void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
+ compiler::Node* value2,
+ compiler::Node* value3,
+ int operand_index);
+
+ RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
+ Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
+ int index);
+ Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
+ int index);
+
+ // Load constant at the index specified in operand |operand_index| from the
+ // constant pool.
+ compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
+ // Load and untag constant at the index specified in operand |operand_index|
+ // from the constant pool.
+ compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
+ int operand_index);
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
-
// Load and untag constant at |index| in the constant pool.
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
@@ -135,12 +159,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* feedback_vector,
compiler::Node* slot_id);
- // Call JSFunction or Callable |function| with |arg_count| arguments (not
- // including receiver) and the first argument located at |first_arg|, possibly
+ // Call JSFunction or Callable |function| with |args| arguments, possibly
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
- compiler::Node* first_arg, compiler::Node* arg_count,
+ const RegListNodePair& args,
ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
@@ -151,46 +174,41 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, TArgs... args);
- // Call JSFunction or Callable |function| with |arg_count|
- // arguments (not including receiver) and the first argument
- // located at |first_arg|, and the final argument being spread. After the call
- // returns directly dispatches to the next bytecode.
+ // Call JSFunction or Callable |function| with |args|
+ // arguments (not including receiver), and the final argument being spread.
+ // After the call returns directly dispatches to the next bytecode.
void CallJSWithSpreadAndDispatch(compiler::Node* function,
compiler::Node* context,
- compiler::Node* first_arg,
- compiler::Node* arg_count,
+ const RegListNodePair& args,
compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call constructor |target| with |arg_count| arguments (not
- // including receiver) and the first argument located at
- // |first_arg|. The |new_target| is the same as the
- // |target| for the new keyword, but differs for the super
- // keyword.
+ // Call constructor |target| with |args| arguments (not including receiver).
+ // The |new_target| is the same as the |target| for the new keyword, but
+ // differs for the super keyword.
compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
compiler::Node* new_target,
- compiler::Node* first_arg,
- compiler::Node* arg_count, compiler::Node* slot_id,
+ const RegListNodePair& args,
+ compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call constructor |target| with |arg_count| arguments (not including
- // receiver) and the first argument located at |first_arg|. The last argument
- // is always a spread. The |new_target| is the same as the |target| for
- // the new keyword, but differs for the super keyword.
+ // Call constructor |target| with |args| arguments (not including
+ // receiver). The last argument is always a spread. The |new_target| is the
+ // same as the |target| for the new keyword, but differs for the super
+ // keyword.
compiler::Node* ConstructWithSpread(compiler::Node* target,
compiler::Node* context,
compiler::Node* new_target,
- compiler::Node* first_arg,
- compiler::Node* arg_count,
+ const RegListNodePair& args,
compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call runtime function with |arg_count| arguments and the first argument
- // located at |first_arg|.
+ // Call runtime function with |args| arguments which will return |return_size|
+ // number of values.
compiler::Node* CallRuntimeN(compiler::Node* function_id,
compiler::Node* context,
- compiler::Node* first_arg,
- compiler::Node* arg_count, int return_size = 1);
+ const RegListNodePair& args,
+ int return_size = 1);
// Jump forward relative to the current bytecode by the |jump_offset|.
compiler::Node* Jump(compiler::Node* jump_offset);
@@ -217,14 +235,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch to the bytecode.
compiler::Node* Dispatch();
- // Dispatch to bytecode handler.
- compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
- return DispatchToBytecodeHandler(handler, BytecodeOffset());
- }
-
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
+ // Dispatch to |target_bytecode| at |new_bytecode_offset|.
+ // |target_bytecode| should be equivalent to loading from the offset.
+ compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
+ compiler::Node* new_bytecode_offset);
+
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
@@ -264,6 +282,18 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// interpreted.
compiler::Node* GetInterpretedFramePointer();
+ // Operations on registers.
+ compiler::Node* RegisterLocation(Register reg);
+ compiler::Node* RegisterLocation(compiler::Node* reg_index);
+ compiler::Node* NextRegister(compiler::Node* reg_index);
+ compiler::Node* LoadRegister(Node* reg_index);
+ void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
+
+ // Poison |value| on speculative paths.
+ compiler::Node* PoisonOnSpeculationTagged(Node* value);
+ compiler::Node* PoisonOnSpeculationWord(Node* value);
+ compiler::Node* PoisonOnSpeculationInt32(Node* value);
+
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue();
@@ -291,16 +321,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
- MachineType result_type);
-
- // Returns zero- or sign-extended to word32 value of the operand.
- compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
- compiler::Node* BytecodeOperandSignedByte(int operand_index);
- compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
- compiler::Node* BytecodeOperandSignedShort(int operand_index);
- compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
- compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+ compiler::Node* BytecodeOperandReadUnalignedUnpoisoned(
+ int relative_offset, MachineType result_type);
+
+ // Returns zero- or sign-extended to word32 value of the operand. Values are
+ // not poisoned on speculation - should be used with care.
+ compiler::Node* BytecodeOperandUnsignedByteUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedByteUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedShortUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedShortUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedQuadUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedQuadUnpoisoned(int operand_index);
+ compiler::Node* BytecodeSignedOperandUnpoisoned(int operand_index,
+ OperandSize operand_size);
+ compiler::Node* BytecodeUnsignedOperandUnpoisoned(int operand_index,
+ OperandSize operand_size);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
@@ -309,6 +344,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
+ // Returns the word-size sign-extended register index for bytecode operand
+ // |operand_index| in the current bytecode. Value is not poisoned on
+ // speculation since the value loaded from the register is poisoned instead.
+ compiler::Node* BytecodeOperandRegUnpoisoned(int operand_index);
+
+ // Returns the word zero-extended index immediate for bytecode operand
+ // |operand_index| in the current bytecode for use when loading a .
+ compiler::Node* BytecodeOperandConstantPoolIdxUnpoisoned(int operand_index);
+
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
// offset). Helper function for Jump and JumpBackward.
@@ -344,18 +388,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// next dispatch offset.
void InlineStar();
- // Dispatch to |target_bytecode| at |new_bytecode_offset|.
- // |target_bytecode| should be equivalent to loading from the offset.
- compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
- compiler::Node* new_bytecode_offset);
-
// Dispatch to the bytecode handler with code offset |handler|.
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset);
+ compiler::Node* bytecode_offset,
+ compiler::Node* target_bytecode);
// Dispatch to the bytecode handler with code entry point |handler_entry|.
compiler::Node* DispatchToBytecodeHandlerEntry(
- compiler::Node* handler_entry, compiler::Node* bytecode_offset);
+ compiler::Node* handler_entry, compiler::Node* bytecode_offset,
+ compiler::Node* target_bytecode);
int CurrentBytecodeSize() const;
@@ -373,6 +414,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
+ Node* speculation_poison_;
+
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 5dabc13ea0..65af249ea7 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -73,8 +73,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
//
// Load constant literal at |idx| in the constant pool into the accumulator.
IGNITION_HANDLER(LdaConstant, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* constant = LoadConstantPoolEntry(index);
+ Node* constant = LoadConstantPoolEntryAtOperandIndex(0);
SetAccumulator(constant);
Dispatch();
}
@@ -123,8 +122,7 @@ IGNITION_HANDLER(LdaFalse, InterpreterAssembler) {
//
// Load accumulator with value from register <src>.
IGNITION_HANDLER(Ldar, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* value = LoadRegister(reg_index);
+ Node* value = LoadRegisterAtOperandIndex(0);
SetAccumulator(value);
Dispatch();
}
@@ -133,9 +131,8 @@ IGNITION_HANDLER(Ldar, InterpreterAssembler) {
//
// Store accumulator to register <dst>.
IGNITION_HANDLER(Star, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
Node* accumulator = GetAccumulator();
- StoreRegister(accumulator, reg_index);
+ StoreRegisterAtOperandIndex(accumulator, 0);
Dispatch();
}
@@ -143,10 +140,8 @@ IGNITION_HANDLER(Star, InterpreterAssembler) {
//
// Stores the value of register <src> to register <dst>.
IGNITION_HANDLER(Mov, InterpreterAssembler) {
- Node* src_index = BytecodeOperandReg(0);
- Node* src_value = LoadRegister(src_index);
- Node* dst_index = BytecodeOperandReg(1);
- StoreRegister(src_value, dst_index);
+ Node* src_value = LoadRegisterAtOperandIndex(0);
+ StoreRegisterAtOperandIndex(src_value, 1);
Dispatch();
}
@@ -169,8 +164,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
LazyNode<Name> lazy_name = [=] {
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index);
return CAST(name);
};
@@ -214,8 +208,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Node* context = GetContext();
// Store the global via the StoreGlobalIC.
- Node* constant_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(constant_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
@@ -230,8 +223,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -245,8 +237,7 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -283,8 +274,7 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
// |depth| in the context chain starting at |context|.
IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -309,8 +299,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
SetAccumulator(result);
@@ -322,8 +311,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) {
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* result =
CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name);
@@ -340,7 +328,6 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
void LookupContextSlot(Runtime::FunctionId function_id) {
Node* context = GetContext();
- Node* name_index = BytecodeOperandIdx(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
@@ -360,7 +347,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Slow path when we have to call out to the runtime.
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -416,8 +403,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
// Slow path when we have to call out to the runtime
BIND(&slowpath);
{
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -448,9 +434,8 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
// pool entry |name_index|.
IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* bytecode_flags = BytecodeOperandFlag(1);
- Node* name = LoadConstantPoolEntry(index);
Node* context = GetContext();
Variable var_result(this, MachineRepresentation::kTagged);
@@ -510,14 +495,11 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
Node* smi_slot = SmiTag(feedback_slot);
// Load receiver.
- Node* register_index = BytecodeOperandReg(0);
- Node* recv = LoadRegister(register_index);
+ Node* recv = LoadRegisterAtOperandIndex(0);
// Load the name.
// TODO(jgruber): Not needed for monomorphic smi handler constant/field case.
- Node* constant_index = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(constant_index);
-
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* context = GetContext();
Label done(this);
@@ -543,8 +525,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
Node* code_target = HeapConstant(ic.code());
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* name = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
@@ -565,10 +546,8 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
void StaNamedProperty(Callable ic) {
Node* code_target = HeapConstant(ic.code());
- Node* object_reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg_index);
- Node* constant_index = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(constant_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
@@ -611,10 +590,8 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
Node* code_target = HeapConstant(ic.code());
- Node* object_reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg_index);
- Node* name_reg_index = BytecodeOperandReg(1);
- Node* name = LoadRegister(name_reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadRegisterAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
@@ -638,10 +615,10 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
// This definition is not observable and is used only for definitions
// in object or class literals.
IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
- Node* object = LoadRegister(BytecodeOperandReg(0));
- Node* name = LoadRegister(BytecodeOperandReg(1));
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadRegisterAtOperandIndex(1);
Node* value = GetAccumulator();
- Node* flags = SmiFromWord32(BytecodeOperandFlag(2));
+ Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* vector_index = SmiTag(BytecodeOperandIdx(3));
Node* feedback_vector = LoadFeedbackVector();
@@ -749,10 +726,9 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
// Saves the current context in <context>, and pushes the accumulator as the
// new current context.
IGNITION_HANDLER(PushContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
Node* new_context = GetAccumulator();
Node* old_context = GetContext();
- StoreRegister(old_context, reg_index);
+ StoreRegisterAtOperandIndex(old_context, 0);
SetContext(new_context);
Dispatch();
}
@@ -761,8 +737,7 @@ IGNITION_HANDLER(PushContext, InterpreterAssembler) {
//
// Pops the current context and sets <context> as the new context.
IGNITION_HANDLER(PopContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
SetContext(context);
Dispatch();
}
@@ -780,8 +755,7 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
bool lhs_is_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
@@ -902,8 +876,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void BitwiseBinaryOpWithFeedback(Operation bitwise_op) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* left = LoadRegister(reg_index);
+ Node* left = LoadRegisterAtOperandIndex(0);
Node* right = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
@@ -969,7 +942,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
&var_left_bigint, &var_left_feedback);
BIND(&do_smi_op);
Node* result =
- BitwiseOp(var_left_word32.value(), SmiToWord32(right), bitwise_op);
+ BitwiseOp(var_left_word32.value(), SmiToInt32(right), bitwise_op);
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
@@ -1279,7 +1252,7 @@ IGNITION_HANDLER(ToName, InterpreterAssembler) {
Node* object = GetAccumulator();
Node* context = GetContext();
Node* result = ToName(context, object);
- StoreRegister(result, BytecodeOperandReg(0));
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1306,7 +1279,7 @@ IGNITION_HANDLER(ToObject, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* context = GetContext();
Node* result = CallStub(callable.descriptor(), target, context, accumulator);
- StoreRegister(result, BytecodeOperandReg(0));
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1449,8 +1422,7 @@ IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
// Delete the property specified in the accumulator from the object
// referenced by the register operand following strict mode semantics.
IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
@@ -1464,8 +1436,7 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
// Delete the property specified in the accumulator from the object
// referenced by the register operand following sloppy mode semantics.
IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
@@ -1482,8 +1453,7 @@ IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
Node* active_function = GetAccumulator();
Node* context = GetContext();
Node* result = GetSuperConstructor(active_function, context);
- Node* reg = BytecodeOperandReg(0);
- StoreRegister(result, reg);
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1495,20 +1465,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Generates code to perform a JS call that collects type feedback.
void JSCall(ConvertReceiverMode receiver_mode) {
- Node* function_reg = BytecodeOperandReg(0);
- Node* function = LoadRegister(function_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* arg_list_count = BytecodeOperandCount(2);
- Node* args_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // The receiver is implied, so it is not in the argument list.
- args_count = arg_list_count;
- } else {
- // Subtract the receiver from the argument count.
- Node* receiver_count = Int32Constant(1);
- args_count = Int32Sub(arg_list_count, receiver_count);
- }
+ Node* function = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
@@ -1517,7 +1475,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
CollectCallFeedback(function, context, feedback_vector, slot_id);
// Call the function and dispatch to the next handler.
- CallJSAndDispatch(function, context, first_arg, args_count, receiver_mode);
+ CallJSAndDispatch(function, context, args, receiver_mode);
}
// Generates code to perform a JS call with a known number of arguments that
@@ -1531,8 +1489,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kSlotOperandIndex =
kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
- Node* function_reg = BytecodeOperandReg(0);
- Node* function = LoadRegister(function_reg);
+ Node* function = LoadRegisterAtOperandIndex(0);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
@@ -1548,20 +1505,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
case 1:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
break;
case 3:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 2)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
break;
default:
UNREACHABLE();
@@ -1617,11 +1574,9 @@ IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) {
// registers.
IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
Node* function_id = BytecodeOperandRuntimeId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result = CallRuntimeN(function_id, context, first_arg, args_count);
+ Node* result = CallRuntimeN(function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1633,11 +1588,9 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
// arguments in subsequent registers.
IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
Node* function_id = BytecodeOperandIntrinsicId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* arg_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result = GenerateInvokeIntrinsic(this, function_id, context,
- first_arg_reg, arg_count);
+ Node* result = GenerateInvokeIntrinsic(this, function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1651,19 +1604,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the runtime function.
Node* function_id = BytecodeOperandRuntimeId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result_pair =
- CallRuntimeN(function_id, context, first_arg, args_count, 2);
+ Node* result_pair = CallRuntimeN(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- Node* first_return_reg = BytecodeOperandReg(3);
- Node* second_return_reg = NextRegister(first_return_reg);
Node* result0 = Projection(0, result_pair);
Node* result1 = Projection(1, result_pair);
- StoreRegister(result0, first_return_reg);
- StoreRegister(result1, second_return_reg);
+ StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -1673,9 +1620,7 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// in register |receiver| and |arg_count| arguments in subsequent registers.
IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
Node* context_index = BytecodeOperandNativeContextIndex(0);
- Node* receiver_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(receiver_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
// Get the function to call from the native context.
Node* context = GetContext();
@@ -1683,7 +1628,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
Node* function = LoadContextElement(native_context, context_index);
// Call the function.
- CallJSAndDispatch(function, context, first_arg, args_count,
+ CallJSAndDispatch(function, context, args,
ConvertReceiverMode::kNullOrUndefined);
}
@@ -1694,20 +1639,15 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
// final argument is always a spread.
//
IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
- Node* callable_reg = BytecodeOperandReg(0);
- Node* callable = LoadRegister(callable_reg);
- Node* receiver_reg = BytecodeOperandReg(1);
- Node* receiver_arg = RegisterLocation(receiver_reg);
- Node* receiver_args_count = BytecodeOperandCount(2);
- Node* receiver_count = Int32Constant(1);
- Node* args_count = Int32Sub(receiver_args_count, receiver_count);
+ Node* callable = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
- CallJSWithSpreadAndDispatch(callable, context, receiver_arg, args_count,
- slot_id, feedback_vector);
+ CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
+ feedback_vector);
}
// ConstructWithSpread <first_arg> <arg_count>
@@ -1718,17 +1658,13 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
//
IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
Node* new_target = GetAccumulator();
- Node* constructor_reg = BytecodeOperandReg(0);
- Node* constructor = LoadRegister(constructor_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ Node* constructor = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result =
- ConstructWithSpread(constructor, context, new_target, first_arg,
- args_count, slot_id, feedback_vector);
+ Node* result = ConstructWithSpread(constructor, context, new_target, args,
+ slot_id, feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1741,16 +1677,13 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
//
IGNITION_HANDLER(Construct, InterpreterAssembler) {
Node* new_target = GetAccumulator();
- Node* constructor_reg = BytecodeOperandReg(0);
- Node* constructor = LoadRegister(constructor_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ Node* constructor = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result = Construct(constructor, context, new_target, first_arg,
- args_count, slot_id, feedback_vector);
+ Node* result = Construct(constructor, context, new_target, args, slot_id,
+ feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1762,8 +1695,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void CompareOpWithFeedback(Operation compare_op) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
Node* context = GetContext();
@@ -1844,8 +1776,7 @@ IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) {
// Test if the value in the <src> register is strictly equal to the accumulator.
// Type feedback is not collected.
IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
// TODO(5310): This is called only when lhs and rhs are Smis (for ex:
// try-finally or generators) or strings (only when visiting
@@ -1861,8 +1792,7 @@ IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) {
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* property = LoadRegister(reg_index);
+ Node* property = LoadRegisterAtOperandIndex(0);
Node* object = GetAccumulator();
Node* context = GetContext();
@@ -1875,8 +1805,7 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
- Node* object_reg = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* callable = GetAccumulator();
Node* slot_id = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
@@ -2063,8 +1992,7 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) {
// Jump by the number of bytes in the Smi in the |idx| entry in the constant
// pool.
IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Jump(relative_jump);
}
@@ -2088,8 +2016,7 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
@@ -2115,8 +2042,7 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
@@ -2144,8 +2070,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2176,8 +2101,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2202,8 +2126,7 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2223,8 +2146,7 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2244,8 +2166,7 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2266,8 +2187,7 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2297,8 +2217,7 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
// pool if the object referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_object(this), if_notobject(this), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2378,11 +2297,10 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- Node* pattern_index = BytecodeOperandIdx(0);
- Node* pattern = LoadConstantPoolEntry(pattern_index);
+ Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
Node* feedback_vector = LoadFeedbackVector();
Node* slot_id = BytecodeOperandIdx(1);
- Node* flags = SmiFromWord32(BytecodeOperandFlag(2));
+ Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateRegExpLiteral(
@@ -2421,8 +2339,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
Node* flags = SmiTag(flags_raw);
- Node* index = BytecodeOperandIdx(0);
- Node* constant_elements = LoadConstantPoolEntry(index);
+ Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
Node* result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
SmiTag(slot_id), constant_elements, flags);
@@ -2466,15 +2383,14 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
feedback_vector, slot_id, &if_not_fast_clone);
- StoreRegister(result, BytecodeOperandReg(3));
+ StoreRegisterAtOperandIndex(result, 3);
Dispatch();
}
BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
- Node* index = BytecodeOperandIdx(0);
- Node* boilerplate_description = LoadConstantPoolEntry(index);
+ Node* boilerplate_description = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
@@ -2484,7 +2400,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* result =
CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
SmiTag(slot_id), boilerplate_description, flags);
- StoreRegister(result, BytecodeOperandReg(3));
+ StoreRegisterAtOperandIndex(result, 3);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
}
@@ -2501,19 +2417,34 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
Dispatch();
}
-// GetTemplateObject
+// GetTemplateObject <descriptor_idx> <literal_idx>
//
// Creates the template to pass for tagged templates and returns it in the
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- Node* description_index = BytecodeOperandIdx(0);
- Node* description = LoadConstantPoolEntry(description_index);
- Node* context = GetContext();
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot = BytecodeOperandIdx(1);
+ Node* cached_value =
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS);
- Node* result = CallRuntime(Runtime::kGetTemplateObject, context, description);
- SetAccumulator(result);
+ Label call_runtime(this, Label::kDeferred);
+ GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
+
+ SetAccumulator(cached_value);
Dispatch();
+
+ BIND(&call_runtime);
+ {
+ Node* description = LoadConstantPoolEntryAtOperandIndex(0);
+ Node* context = GetContext();
+ Node* result =
+ CallRuntime(Runtime::kCreateTemplateObject, context, description);
+ StoreFeedbackVectorSlot(feedback_vector, slot, result, UPDATE_WRITE_BARRIER,
+ 0, INTPTR_PARAMETERS);
+ SetAccumulator(result);
+ Dispatch();
+ }
}
// CreateClosure <index> <slot> <tenured>
@@ -2521,35 +2452,47 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* shared = LoadConstantPoolEntry(index);
+ Node* shared = LoadConstantPoolEntryAtOperandIndex(0);
Node* flags = BytecodeOperandFlag(2);
Node* context = GetContext();
-
- Label call_runtime(this, Label::kDeferred);
- GotoIfNot(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
- &call_runtime);
- ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* vector_index = BytecodeOperandIdx(1);
- vector_index = SmiTag(vector_index);
+ Node* slot = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- SetAccumulator(constructor_assembler.EmitFastNewClosure(
- shared, feedback_vector, vector_index, context));
- Dispatch();
+ Node* feedback_cell = LoadFeedbackVectorSlot(feedback_vector, slot);
- BIND(&call_runtime);
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
{
- Node* tenured_raw =
- DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
- Node* tenured = SmiTag(tenured_raw);
- feedback_vector = LoadFeedbackVector();
- vector_index = BytecodeOperandIdx(1);
- vector_index = SmiTag(vector_index);
- Node* result = CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
- feedback_vector, vector_index, tenured);
+ Node* result =
+ CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
SetAccumulator(result);
Dispatch();
}
+
+ BIND(&if_slow);
+ {
+ Label if_newspace(this), if_oldspace(this);
+ Branch(IsSetWord32<CreateClosureFlags::PretenuredBit>(flags), &if_oldspace,
+ &if_newspace);
+
+ BIND(&if_newspace);
+ {
+ Node* result =
+ CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
+ SetAccumulator(result);
+ Dispatch();
+ }
+
+ BIND(&if_oldspace);
+ {
+ Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
+ feedback_cell);
+ SetAccumulator(result);
+ Dispatch();
+ }
+ }
}
// CreateBlockContext <index>
@@ -2557,8 +2500,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
// Creates a new block context with the scope info constant at |index| and the
// closure in the accumulator.
IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(index);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(
@@ -2572,12 +2514,9 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
// closure in the accumulator.
IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
- Node* exception_reg = BytecodeOperandReg(0);
- Node* exception = LoadRegister(exception_reg);
- Node* name_idx = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(name_idx);
- Node* scope_info_idx = BytecodeOperandIdx(2);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ Node* exception = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(2);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushCatchContext, context, name,
@@ -2617,10 +2556,8 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
// with-statement with the object in |register| and the closure in the
// accumulator.
IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
- Node* scope_info_idx = BytecodeOperandIdx(1);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushWithContext, context, object,
@@ -2771,7 +2708,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
BIND(&throw_error);
{
- Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0));
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
@@ -2834,10 +2771,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
Node* result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
Node* return_value = Projection(0, result_pair); \
- Node* original_handler = Projection(1, result_pair); \
+ Node* original_bytecode = SmiUntag(Projection(1, result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
- DispatchToBytecodeHandler(original_handler); \
+ DispatchToBytecode(original_bytecode, BytecodeOffset()); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
@@ -2856,30 +2793,13 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
Dispatch();
}
-class InterpreterForInPrepareAssembler : public InterpreterAssembler {
- public:
- InterpreterForInPrepareAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- void BuildForInPrepareResult(Node* output_register, Node* cache_type,
- Node* cache_array, Node* cache_length) {
- StoreRegister(cache_type, output_register);
- output_register = NextRegister(output_register);
- StoreRegister(cache_array, output_register);
- output_register = NextRegister(output_register);
- StoreRegister(cache_length, output_register);
- }
-};
-
// ForInEnumerate <receiver>
//
// Enumerates the enumerable keys of the |receiver| and either returns the
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- Node* receiver_register = BytecodeOperandReg(0);
- Node* receiver = LoadRegister(receiver_register);
+ Node* receiver = LoadRegisterAtOperandIndex(0);
Node* context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
@@ -2910,9 +2830,8 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
// The result is output in registers |cache_info_triple| to
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
-IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
+IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* enumerator = GetAccumulator();
- Node* output_register = BytecodeOperandReg(0);
Node* vector_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
@@ -2946,8 +2865,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
Node* cache_type = enumerator;
Node* cache_array = enum_keys;
Node* cache_length = SmiTag(enum_length);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length);
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
@@ -2964,8 +2882,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
Node* cache_type = enumerator;
Node* cache_array = enumerator;
Node* cache_length = LoadFixedArrayBaseLength(enumerator);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length);
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
}
@@ -2974,14 +2891,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
//
// Returns the next enumerable property in the the accumulator.
IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
- Node* receiver_reg = BytecodeOperandReg(0);
- Node* receiver = LoadRegister(receiver_reg);
- Node* index_reg = BytecodeOperandReg(1);
- Node* index = LoadRegister(index_reg);
- Node* cache_type_reg = BytecodeOperandReg(2);
- Node* cache_type = LoadRegister(cache_type_reg);
- Node* cache_array_reg = NextRegister(cache_type_reg);
- Node* cache_array = LoadRegister(cache_array_reg);
+ Node* receiver = LoadRegisterAtOperandIndex(0);
+ Node* index = LoadRegisterAtOperandIndex(1);
+ Node* cache_type;
+ Node* cache_array;
+ std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
Node* vector_index = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
@@ -3017,10 +2931,8 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
//
// Returns false if the end of the enumerable properties has been reached.
IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
- Node* index_reg = BytecodeOperandReg(0);
- Node* index = LoadRegister(index_reg);
- Node* cache_length_reg = BytecodeOperandReg(1);
- Node* cache_length = LoadRegister(cache_length_reg);
+ Node* index = LoadRegisterAtOperandIndex(0);
+ Node* cache_length = LoadRegisterAtOperandIndex(1);
// Check if {index} is at {cache_length} already.
Label if_true(this), if_false(this), end(this);
@@ -3044,8 +2956,7 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
// Increments the loop counter in register |index| and stores the result
// in the accumulator.
IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
- Node* index_reg = BytecodeOperandReg(0);
- Node* index = LoadRegister(index_reg);
+ Node* index = LoadRegisterAtOperandIndex(0);
Node* one = SmiConstant(1);
Node* result = SmiAdd(index, one);
SetAccumulator(result);
@@ -3078,34 +2989,16 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) {
//
// Exports the register file and stores it into the generator. Also stores the
// current context, |suspend_id|, and the current bytecode offset (for debugging
-// purposes) into the generator.
+// purposes) into the generator. Then, returns the value in the accumulator.
IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
-
- Node* generator = LoadRegister(generator_reg);
-
- Label if_stepping(this, Label::kDeferred), ok(this);
- Node* step_action_address = ExternalConstant(
- ExternalReference::debug_last_step_action_address(isolate()));
- Node* step_action = Load(MachineType::Int8(), step_action_address);
- STATIC_ASSERT(StepIn > StepNext);
- STATIC_ASSERT(LastStepAction == StepIn);
- Node* step_next = Int32Constant(StepNext);
- Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
- BIND(&ok);
-
+ Node* generator = LoadRegisterAtOperandIndex(0);
Node* array =
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
Node* context = GetContext();
+ RegListNodePair registers = GetRegisterListAtOperandIndex(1);
Node* suspend_id = BytecodeOperandUImmSmi(3);
- // Bytecode operand 1 should be always 0 (we are always store registers
- // from the beginning).
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
- IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 2 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
- ExportRegisterFile(array, register_count);
+ ExportRegisterFile(array, registers);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
suspend_id);
@@ -3115,59 +3008,66 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* offset = SmiTag(BytecodeOffset());
StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
offset);
- Dispatch();
- BIND(&if_stepping);
- {
- Node* context = GetContext();
- CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
- Goto(&ok);
- }
+ UpdateInterruptBudgetOnReturn();
+ Return(GetAccumulator());
}
-// RestoreGeneratorState <generator>
+// SwitchOnGeneratorState <generator> <table_start> <table_length>
//
-// Loads the generator's state and stores it in the accumulator,
-// before overwriting it with kGeneratorExecuting.
-IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
- Node* generator = LoadRegister(generator_reg);
+// If |generator| is undefined, falls through. Otherwise, loads the
+// generator's state (overwriting it with kGeneratorExecuting), sets the context
+// to the generator's resume context, and performs state dispatch on the
+// generator's state by looking up the generator state in a jump table in the
+// constant pool, starting at |table_start|, and of length |table_length|.
+IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
+ Node* generator = LoadRegisterAtOperandIndex(0);
+
+ Label fallthrough(this);
+ GotoIf(WordEqual(generator, UndefinedConstant()), &fallthrough);
- Node* old_state =
+ Node* state =
LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
- Node* new_state = Int32Constant(JSGeneratorObject::kGeneratorExecuting);
+ Node* new_state = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
- SmiTag(new_state));
- SetAccumulator(old_state);
+ new_state);
+
+ Node* context = LoadObjectField(generator, JSGeneratorObject::kContextOffset);
+ SetContext(context);
+
+ Node* table_start = BytecodeOperandIdx(1);
+ // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
+ // actually need it otherwise.
+ Node* table_length = BytecodeOperandUImmWord(2);
+
+ // The state must be a Smi.
+ CSA_ASSERT(this, TaggedIsSmi(state));
+ Node* case_value = SmiUntag(state);
+
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrLessThan(case_value, table_length));
+ USE(table_length);
+
+ Node* entry = IntPtrAdd(table_start, case_value);
+ Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+ Jump(relative_jump);
+
+ BIND(&fallthrough);
Dispatch();
}
-// ResumeGenerator <generator> <generator_state> <first output
-// register> <register count>
+// ResumeGenerator <generator> <first output register> <register count>
//
// Imports the register file stored in the generator and marks the generator
// state as executing.
IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
- Node* generator_state_reg = BytecodeOperandReg(1);
- // Bytecode operand 2 is the start register. It should always be 0, so let's
- // ignore it.
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(2),
- IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 3 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(3));
-
- Node* generator = LoadRegister(generator_reg);
+ Node* generator = LoadRegisterAtOperandIndex(0);
+ RegListNodePair registers = GetRegisterListAtOperandIndex(1);
ImportRegisterFile(
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
- register_count);
-
- // Since we're resuming, update the generator state to indicate that the
- // generator is now executing.
- StoreRegister(SmiConstant(JSGeneratorObject::kGeneratorExecuting),
- generator_state_reg);
+ registers);
// Return the generator's input_or_debug_pos in the accumulator.
SetAccumulator(
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 7ad8d49b63..e44289bb6c 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/factory-inl.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -27,8 +28,8 @@ class IntrinsicsGenerator {
zone_(assembler->zone()),
assembler_(assembler) {}
- Node* InvokeIntrinsic(Node* function_id, Node* context, Node* first_arg_reg,
- Node* arg_count);
+ Node* InvokeIntrinsic(Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args);
private:
enum InstanceTypeCompareMode {
@@ -38,18 +39,21 @@ class IntrinsicsGenerator {
Node* IsInstanceType(Node* input, int type);
Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode);
- Node* IntrinsicAsStubCall(Node* input, Node* context,
- Callable const& callable);
- Node* IntrinsicAsBuiltinCall(Node* input, Node* context, Builtins::Name name);
+ Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args,
+ Node* context, Callable const& callable);
+ Node* IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Builtins::Name name);
void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
- Node* name(Node* input, Node* arg_count, Node* context);
+ Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
Isolate* isolate() { return isolate_; }
Zone* zone() { return zone_; }
+ Factory* factory() { return isolate()->factory(); }
Isolate* isolate_;
Zone* zone_;
@@ -58,19 +62,18 @@ class IntrinsicsGenerator {
DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
-Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler,
- Node* function_id, Node* context,
- Node* first_arg_reg, Node* arg_count) {
+Node* GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args) {
IntrinsicsGenerator generator(assembler);
- return generator.InvokeIntrinsic(function_id, context, first_arg_reg,
- arg_count);
+ return generator.InvokeIntrinsic(function_id, context, args);
}
#define __ assembler_->
-Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
- Node* first_arg_reg,
- Node* arg_count) {
+Node* IntrinsicsGenerator::InvokeIntrinsic(
+ Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args) {
InterpreterAssembler::Label abort(assembler_), end(assembler_);
InterpreterAssembler::Variable result(assembler_,
MachineRepresentation::kTagged);
@@ -90,17 +93,17 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
#undef CASE
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
-#define HANDLE_CASE(name, lower_case, expected_arg_count) \
- __ BIND(&lower_case); \
- { \
- if (FLAG_debug_code && expected_arg_count >= 0) { \
- AbortIfArgCountMismatch(expected_arg_count, arg_count); \
- } \
- Node* value = name(first_arg_reg, arg_count, context); \
- if (value) { \
- result.Bind(value); \
- __ Goto(&end); \
- } \
+#define HANDLE_CASE(name, lower_case, expected_arg_count) \
+ __ BIND(&lower_case); \
+ { \
+ if (FLAG_debug_code && expected_arg_count >= 0) { \
+ AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
+ } \
+ Node* value = name(args, context); \
+ if (value) { \
+ result.Bind(value); \
+ __ Goto(&end); \
+ } \
}
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
@@ -129,238 +132,195 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
}
Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- // TODO(ishell): Use Select here.
- InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
- return_false(assembler_), end(assembler_);
- Node* arg = __ LoadRegister(input);
- __ GotoIf(__ TaggedIsSmi(arg), &return_false);
-
- Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
- __ Branch(condition, &return_true, &return_false);
-
- __ BIND(&return_true);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&return_false);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
-}
-
-Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count,
- Node* context) {
- // TODO(ishell): Use Select here.
- // TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
- end(assembler_);
-
- Node* arg = __ LoadRegister(input);
- __ GotoIf(__ TaggedIsSmi(arg), &return_false);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
- kInstanceTypeGreaterThanOrEqual);
- __ Branch(condition, &return_true, &return_false);
-
- __ BIND(&return_true);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&return_false);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
-}
-
-Node* IntrinsicsGenerator::IsArray(Node* input, Node* arg_count,
- Node* context) {
+ Node* result =
+ __ Select(__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
+ [=] {
+ return __ SelectBooleanConstant(
+ CompareInstanceType(input, type, kInstanceTypeEqual));
+ },
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* IntrinsicsGenerator::IsJSReceiver(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
+ Node* result = __ Select(
+ __ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
+ [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); },
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* IntrinsicsGenerator::IsArray(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSProxy(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSProxy(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_PROXY_TYPE);
}
-Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsTypedArray(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSMap(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_MAP_TYPE);
}
-Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSSet(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_SET_TYPE);
}
-Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSWeakMap(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_WEAK_MAP_TYPE);
}
-Node* IntrinsicsGenerator::IsJSWeakSet(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSWeakSet(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_WEAK_SET_TYPE);
}
-Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
- // TODO(ishell): Use SelectBooleanConstant here.
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
- end(assembler_);
-
- Node* arg = __ LoadRegister(input);
-
- __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
- __ BIND(&if_smi);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&if_not_smi);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
+Node* IntrinsicsGenerator::IsSmi(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
+ return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-Node* IntrinsicsGenerator::IntrinsicAsStubCall(Node* args_reg, Node* context,
- Callable const& callable) {
+Node* IntrinsicsGenerator::IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
int input_count = param_count + 2; // +2 for target and context
- Node** args = zone()->NewArray<Node*>(input_count);
+ Node** stub_args = zone()->NewArray<Node*>(input_count);
int index = 0;
- args[index++] = __ HeapConstant(callable.code());
+ stub_args[index++] = __ HeapConstant(callable.code());
for (int i = 0; i < param_count; i++) {
- args[index++] = __ LoadRegister(args_reg);
- args_reg = __ NextRegister(args_reg);
+ stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
- args[index++] = context;
- return __ CallStubN(callable.descriptor(), 1, input_count, args);
+ stub_args[index++] = context;
+ return __ CallStubN(callable.descriptor(), 1, input_count, stub_args);
}
-Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(Node* input, Node* context,
- Builtins::Name name) {
+Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Builtins::Name name) {
Callable callable = Builtins::CallableFor(isolate_, name);
- return IntrinsicAsStubCall(input, context, callable);
+ return IntrinsicAsStubCall(args, context, callable);
}
-Node* IntrinsicsGenerator::CreateIterResultObject(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::CreateIterResultObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context,
+ args, context,
Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
}
-Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::HasProperty(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::RejectPromise(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToString));
+ args, context,
+ Builtins::CallableFor(isolate(), Builtins::kRejectPromise));
}
-Node* IntrinsicsGenerator::ToLength(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ResolvePromise(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
+ args, context,
+ Builtins::CallableFor(isolate(), Builtins::kResolvePromise));
}
-Node* IntrinsicsGenerator::ToInteger(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToString(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
}
-Node* IntrinsicsGenerator::ToNumber(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToLength(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
-Node* IntrinsicsGenerator::ToObject(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToInteger(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
}
-Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
- Node* context) {
- // First argument register contains the function target.
- Node* function = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::ToNumber(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
+}
- // Receiver is the second runtime call argument.
- Node* receiver_reg = __ NextRegister(args_reg);
- Node* receiver_arg = __ RegisterLocation(receiver_reg);
+Node* IntrinsicsGenerator::ToObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+}
- // Subtract function and receiver from arg count.
- Node* function_and_receiver_count = __ Int32Constant(2);
- Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count);
+Node* IntrinsicsGenerator::Call(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ // First argument register contains the function target.
+ Node* function = __ LoadRegisterFromRegisterList(args, 0);
+
+ // The arguments for the target function are from the second runtime call
+ // argument.
+ InterpreterAssembler::RegListNodePair target_args(
+ __ RegisterLocationInRegisterList(args, 1),
+ __ Int32Sub(args.reg_count(), __ Int32Constant(1)));
if (FLAG_debug_code) {
InterpreterAssembler::Label arg_count_positive(assembler_);
- Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
+ Node* comparison =
+ __ Int32LessThan(target_args.reg_count(), __ Int32Constant(0));
__ GotoIfNot(comparison, &arg_count_positive);
__ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
__ BIND(&arg_count_positive);
}
- __ CallJSAndDispatch(function, context, receiver_arg, target_args_count,
+ __ CallJSAndDispatch(function, context, target_args,
ConvertReceiverMode::kAny);
return nullptr; // We never return from the CallJSAndDispatch above.
}
-Node* IntrinsicsGenerator::ClassOf(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* value = __ LoadRegister(args_reg);
- return __ ClassOf(value);
-}
-
-Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
- Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
- Node* sync_iterator = __ LoadRegister(args_reg);
+ Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
__ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
__ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
+ Node* const next =
+ __ GetProperty(context, sync_iterator, factory()->next_string());
+
Node* const native_context = __ LoadNativeContext(context);
Node* const map = __ LoadContextElement(
native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX);
@@ -368,6 +328,8 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
__ StoreObjectFieldNoWriteBarrier(
iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
+ __ StoreObjectFieldNoWriteBarrier(iterator,
+ JSAsyncFromSyncIterator::kNextOffset, next);
return_value.Bind(iterator);
__ Goto(&done);
@@ -385,52 +347,41 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
return return_value.value();
}
-Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
+Node* IntrinsicsGenerator::CreateJSGeneratorObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
Builtins::kCreateGeneratorObject);
}
-Node* IntrinsicsGenerator::GeneratorGetContext(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
- Node* const value =
- __ LoadObjectField(generator, JSGeneratorObject::kContextOffset);
-
- return value;
-}
-
-Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(Node* args_reg,
- Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
Node* const value =
__ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg,
- Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorGetResumeMode(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
Node* const value =
__ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorClose(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
__ StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
__ SmiConstant(JSGeneratorObject::kGeneratorClosed));
return __ UndefinedConstant();
}
-Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::GetImportMetaObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* const module_context = __ LoadModuleContext(context);
Node* const module =
__ LoadContextElement(module_context, Context::EXTENSION_INDEX);
@@ -451,21 +402,44 @@ Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count,
return return_value.value();
}
-Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
- Builtins::kAsyncGeneratorReject);
+Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorReject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
}
-Node* IntrinsicsGenerator::AsyncGeneratorResolve(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
+Node* IntrinsicsGenerator::AsyncGeneratorResolve(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorYield(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context, Builtins::kAsyncGeneratorYield);
+Node* IntrinsicsGenerator::AsyncGeneratorYield(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
}
void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
index 11442438d5..fd4e167ed0 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_
#define V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_
+#include "src/interpreter/interpreter-assembler.h"
+
namespace v8 {
namespace internal {
@@ -14,13 +16,9 @@ class Node;
namespace interpreter {
-class InterpreterAssembler;
-
-extern compiler::Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler,
- compiler::Node* function_id,
- compiler::Node* context,
- compiler::Node* first_arg_reg,
- compiler::Node* arg_count);
+extern compiler::Node* GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, compiler::Node* function_id,
+ compiler::Node* context, const InterpreterAssembler::RegListNodePair& args);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index b9137c8559..6cdfec2d04 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -14,17 +14,19 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
#define INTRINSICS_LIST(V) \
+ V(AsyncFunctionAwaitCaught, async_function_await_caught, 3) \
+ V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 3) \
+ V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \
+ V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \
V(AsyncGeneratorReject, async_generator_reject, 2) \
V(AsyncGeneratorResolve, async_generator_resolve, 3) \
V(AsyncGeneratorYield, async_generator_yield, 3) \
V(CreateJSGeneratorObject, create_js_generator_object, 2) \
- V(GeneratorGetContext, generator_get_context, 1) \
V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
V(GeneratorClose, generator_close, 1) \
V(GetImportMetaObject, get_import_meta_object, 0) \
V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
@@ -37,6 +39,8 @@ namespace interpreter {
V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
+ V(RejectPromise, reject_promise, 3) \
+ V(ResolvePromise, resolve_promise, 2) \
V(ToString, to_string, 1) \
V(ToLength, to_length, 1) \
V(ToInteger, to_integer, 1) \
@@ -65,4 +69,4 @@ class IntrinsicsHelper {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index fb74d37df4..0702536b3d 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -29,7 +29,8 @@ namespace interpreter {
class InterpreterCompilationJob final : public CompilationJob {
public:
InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator);
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
protected:
Status PrepareJobImpl(Isolate* isolate) final;
@@ -66,11 +67,6 @@ Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
DCHECK(FLAG_lazy_handler_deserialization);
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing handler %s\n",
- Bytecodes::ToString(bytecode, operand_scale).c_str());
- }
-
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
@@ -123,13 +119,17 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
Object* old_code = code;
- v->VisitRootPointer(Root::kDispatchTable, &code);
+ v->VisitRootPointer(Root::kDispatchTable, nullptr, &code);
if (code != old_code) {
dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
}
}
}
+int Interpreter::InterruptBudget() {
+ return FLAG_interrupt_budget;
+}
+
namespace {
void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
@@ -163,12 +163,14 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator)
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals)
: CompilationJob(parse_info->stack_limit(), parse_info, &compilation_info_,
"Ignition", State::kReadyToExecute),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
- generator_(&compilation_info_, parse_info->ast_string_constants()) {}
+ generator_(&compilation_info_, parse_info->ast_string_constants(),
+ eager_inner_literals) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl(
Isolate* isolate) {
@@ -226,10 +228,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
return SUCCEEDED;
}
-CompilationJob* Interpreter::NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator) {
- return new InterpreterCompilationJob(parse_info, literal, allocator);
+CompilationJob* Interpreter::NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals) {
+ return new InterpreterCompilationJob(parse_info, literal, allocator,
+ eager_inner_literals);
}
bool Interpreter::IsDispatchTableInitialized() const {
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 7e6d013a29..83dfea89f9 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -27,6 +27,8 @@ class FunctionLiteral;
class ParseInfo;
class RootVisitor;
class SetupIsolateDelegate;
+template <typename>
+class ZoneVector;
namespace interpreter {
@@ -37,10 +39,16 @@ class Interpreter {
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() {}
+ // Returns the interrupt budget which should be used for the profiler counter.
+ static int InterruptBudget();
+
// Creates a compilation job which will generate bytecode for |literal|.
- static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator);
+ // Additionally, if |eager_inner_literals| is not null, adds any eagerly
+ // compilable inner FunctionLiterals to this list.
+ static CompilationJob* NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
@@ -72,9 +80,6 @@ class Interpreter {
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
- // The interrupt budget which should be used for the profiler counter.
- static const int kInterruptBudget = 144 * KB;
-
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index f51c1cd29a..9da1aa4110 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -131,7 +131,7 @@ bool Isolate::IsArrayConstructorIntact() {
return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsArraySpeciesLookupChainIntact() {
+bool Isolate::IsSpeciesLookupChainIntact() {
// Note: It would be nice to have debug checks to make sure that the
// species protector is accurate, but this would be hard to do for most of
// what the protector stands for:
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 7165d88d34..38506bfc25 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -20,6 +20,8 @@
#include "src/base/utils/random-number-generator.h"
#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
+#include "src/builtins/constants-table-builder.h"
+#include "src/callable.h"
#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -32,6 +34,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
+#include "src/instruction-stream.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
@@ -39,6 +42,7 @@
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
@@ -110,8 +114,6 @@ void ThreadLocalTop::InitializeInternal() {
rethrowing_message_ = false;
pending_message_obj_ = nullptr;
scheduled_exception_ = nullptr;
- microtask_queue_bailout_index_ = -1;
- microtask_queue_bailout_count_ = 0;
}
@@ -221,16 +223,19 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- v->VisitRootPointer(Root::kTop, &thread->pending_exception_);
- v->VisitRootPointer(Root::kTop, &thread->wasm_caught_exception_);
- v->VisitRootPointer(Root::kTop, &thread->pending_message_obj_);
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(thread->context_)));
- v->VisitRootPointer(Root::kTop, &thread->scheduled_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->wasm_caught_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_message_obj_);
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(thread->context_)));
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->scheduled_exception_);
for (v8::TryCatch* block = thread->try_catch_handler(); block != nullptr;
block = block->next_) {
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->exception_)));
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->message_obj_)));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(block->exception_)));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(block->message_obj_)));
}
// Iterate over pointers on native execution stack.
@@ -312,61 +317,44 @@ Handle<String> Isolate::StackTraceString() {
}
}
-void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- unsigned int magic2) {
- PushStackTraceAndDie(magic1, ptr1, ptr2, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, magic2);
+void Isolate::PushStackTraceAndDie(void* ptr1, void* ptr2, void* ptr3,
+ void* ptr4) {
+ StackTraceFailureMessage message(this, ptr1, ptr2, ptr3, ptr4);
+ message.Print();
+ base::OS::Abort();
}
-void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- void* ptr3, void* ptr4, void* ptr5,
- void* ptr6, void* ptr7, void* ptr8,
- unsigned int magic2) {
- const int kMaxStackTraceSize = 32 * KB;
- Handle<String> trace = StackTraceString();
- uint8_t buffer[kMaxStackTraceSize];
- int length = Min(kMaxStackTraceSize - 1, trace->length());
- String::WriteToFlat(*trace, buffer, 0, length);
- buffer[length] = '\0';
- // TODO(dcarney): convert buffer to utf8?
- base::OS::PrintError(
- "Stacktrace:"
- "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p "
- "ptr6=%p ptr7=%p ptr8=%p\n\n%s",
- magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8,
- reinterpret_cast<char*>(buffer));
- PushCodeObjectsAndDie(0xDEADC0DE, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
- ptr8, 0xDEADC0DE);
-}
-
-void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- void* ptr3, void* ptr4, void* ptr5,
- void* ptr6, void* ptr7, void* ptr8,
- unsigned int magic2) {
- const int kMaxCodeObjects = 16;
- // Mark as volatile to lower the probability of optimizing code_objects
- // away. The first and last entries are set to the magic markers, making it
- // easier to spot the array on the stack.
- void* volatile code_objects[kMaxCodeObjects + 2];
- code_objects[0] = reinterpret_cast<void*>(magic1);
- code_objects[kMaxCodeObjects + 1] = reinterpret_cast<void*>(magic2);
- StackFrameIterator it(this);
- int numCodeObjects = 0;
- for (; !it.done() && numCodeObjects < kMaxCodeObjects; it.Advance()) {
- code_objects[1 + numCodeObjects++] = it.frame()->unchecked_code();
- }
-
- // Keep the top raw code object pointers on the stack in the hope that the
- // corresponding pages end up more frequently in the minidump.
+void StackTraceFailureMessage::Print() volatile {
+ // Print the details of this failure message object, including its own address
+ // to force stack allocation.
base::OS::PrintError(
- "\nCodeObjects (%p length=%i): 1:%p 2:%p 3:%p 4:%p..."
- "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p "
- "ptr6=%p ptr7=%p ptr8=%p\n\n",
- static_cast<void*>(code_objects[0]), numCodeObjects,
- static_cast<void*>(code_objects[1]), static_cast<void*>(code_objects[2]),
- static_cast<void*>(code_objects[3]), static_cast<void*>(code_objects[4]),
- magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8);
- base::OS::Abort();
+ "Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
+ "failure_message_object=%p\n%s",
+ ptr1_, ptr2_, ptr3_, ptr4_, this, &js_stack_trace_[0]);
+}
+
+StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
+ void* ptr2, void* ptr3,
+ void* ptr4) {
+ isolate_ = isolate;
+ ptr1_ = ptr1;
+ ptr2_ = ptr2;
+ ptr3_ = ptr3;
+ ptr4_ = ptr4;
+ // Write a stracktrace into the {js_stack_trace_} buffer.
+ const size_t buffer_length = arraysize(js_stack_trace_);
+ memset(&js_stack_trace_, 0, buffer_length);
+ FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
+ StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
+ isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ // Keeping a reference to the last code objects to increase likelyhood that
+ // they get included in the minidump.
+ const size_t code_objects_length = arraysize(code_objects_);
+ size_t i = 0;
+ StackFrameIterator it(isolate);
+ for (; !it.done() && i < code_objects_length; it.Advance()) {
+ code_objects_[i++] = it.frame()->unchecked_code();
+ }
}
namespace {
@@ -717,16 +705,16 @@ class CaptureStackTraceHelper {
int code_offset;
Handle<ByteArray> source_position_table;
Handle<Object> maybe_cache;
- Handle<NumberDictionary> cache;
+ Handle<SimpleNumberDictionary> cache;
if (!FLAG_optimize_for_size) {
code_offset = summ.code_offset();
source_position_table =
handle(summ.abstract_code()->source_position_table(), isolate_);
maybe_cache = handle(summ.abstract_code()->stack_frame_cache(), isolate_);
- if (maybe_cache->IsNumberDictionary()) {
- cache = Handle<NumberDictionary>::cast(maybe_cache);
+ if (maybe_cache->IsSimpleNumberDictionary()) {
+ cache = Handle<SimpleNumberDictionary>::cast(maybe_cache);
} else {
- cache = NumberDictionary::New(isolate_, 1);
+ cache = SimpleNumberDictionary::New(isolate_, 1);
}
int entry = cache->FindEntry(code_offset);
if (entry != NumberDictionary::kNotFound) {
@@ -759,7 +747,7 @@ class CaptureStackTraceHelper {
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
if (!FLAG_optimize_for_size) {
- auto new_cache = NumberDictionary::Set(cache, code_offset, frame);
+ auto new_cache = SimpleNumberDictionary::Set(cache, code_offset, frame);
if (*new_cache != *cache || !maybe_cache->IsNumberDictionary()) {
AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
}
@@ -1278,10 +1266,11 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code* code = frame->LookupCode();
- return FoundHandler(
- nullptr, code->instruction_start(),
- Smi::ToInt(code->handler_table()->get(0)), code->constant_pool(),
- handler->address() + StackHandlerConstants::kSize, 0);
+ HandlerTable table(code);
+ return FoundHandler(nullptr, code->InstructionStart(),
+ table.LookupReturn(0), code->constant_pool(),
+ handler->address() + StackHandlerConstants::kSize,
+ 0);
}
case StackFrame::WASM_COMPILED: {
@@ -1348,7 +1337,7 @@ Object* Isolate::UnwindAndFindHandler() {
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(nullptr, code->instruction_start(), offset,
+ return FoundHandler(nullptr, code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1358,7 +1347,7 @@ Object* Isolate::UnwindAndFindHandler() {
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
Code* code = stub_frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
break;
}
@@ -1372,7 +1361,7 @@ Object* Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
stack_slots * kPointerSize;
- return FoundHandler(nullptr, code->instruction_start(), offset,
+ return FoundHandler(nullptr, code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1405,7 +1394,7 @@ Object* Isolate::UnwindAndFindHandler() {
Code* code =
builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return FoundHandler(context, code->instruction_start(), 0,
+ return FoundHandler(context, code->InstructionStart(), 0,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1425,7 +1414,7 @@ Object* Isolate::UnwindAndFindHandler() {
WasmInterpreterEntryFrame* interpreter_frame =
WasmInterpreterEntryFrame::cast(frame);
// TODO(wasm): Implement try-catch in the interpreter.
- interpreter_frame->wasm_instance()->debug_info()->Unwind(frame->fp());
+ interpreter_frame->debug_info()->Unwind(frame->fp());
} break;
default:
@@ -1468,9 +1457,8 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
// Must have been constructed from a bytecode array.
CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
int code_offset = summary.code_offset();
- BytecodeArray* bytecode = code->GetBytecodeArray();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- int index = table->LookupRange(code_offset, nullptr, &prediction);
+ HandlerTable table(code->GetBytecodeArray());
+ int index = table.LookupRange(code_offset, nullptr, &prediction);
if (index <= 0) continue;
if (prediction == HandlerTable::UNCAUGHT) continue;
return prediction;
@@ -1534,7 +1522,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
case StackFrame::STUB: {
Handle<Code> code(frame->LookupCode());
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
break;
}
@@ -2013,6 +2001,7 @@ void Isolate::PopPromise() {
}
namespace {
+
bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
Handle<JSPromise> promise);
@@ -2058,39 +2047,28 @@ bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
return true;
}
- Handle<Object> queue(promise->reject_reactions(), isolate);
- Handle<Object> deferred_promise(promise->deferred_promise(), isolate);
-
- if (queue->IsUndefined(isolate)) {
- return false;
- }
-
- if (queue->IsCallable()) {
- return PromiseHandlerCheck(isolate, Handle<JSReceiver>::cast(queue),
- Handle<JSReceiver>::cast(deferred_promise));
- }
-
- if (queue->IsSymbol()) {
- return InternalPromiseHasUserDefinedRejectHandler(
- isolate, Handle<JSPromise>::cast(deferred_promise));
- }
-
- Handle<FixedArray> queue_arr = Handle<FixedArray>::cast(queue);
- Handle<FixedArray> deferred_promise_arr =
- Handle<FixedArray>::cast(deferred_promise);
- for (int i = 0; i < deferred_promise_arr->length(); i++) {
- Handle<JSReceiver> deferred_promise_item(
- JSReceiver::cast(deferred_promise_arr->get(i)));
- if (queue_arr->get(i)->IsSymbol()) {
- if (InternalPromiseHasUserDefinedRejectHandler(
- isolate, Handle<JSPromise>::cast(deferred_promise_item))) {
- return true;
- }
- } else {
- Handle<JSReceiver> queue_item(JSReceiver::cast(queue_arr->get(i)));
- if (PromiseHandlerCheck(isolate, queue_item, deferred_promise_item)) {
- return true;
+ if (promise->status() == Promise::kPending) {
+ Handle<Object> current(promise->reactions(), isolate);
+ while (!current->IsSmi()) {
+ Handle<PromiseReaction> current_reaction =
+ Handle<PromiseReaction>::cast(current);
+ Handle<HeapObject> payload(current_reaction->payload(), isolate);
+ Handle<JSPromise> current_promise;
+ if (JSPromise::From(payload).ToHandle(&current_promise)) {
+ if (current_reaction->reject_handler()->IsCallable()) {
+ Handle<JSReceiver> current_handler(
+ JSReceiver::cast(current_reaction->reject_handler()), isolate);
+ if (PromiseHandlerCheck(isolate, current_handler, current_promise)) {
+ return true;
+ }
+ } else {
+ if (InternalPromiseHasUserDefinedRejectHandler(isolate,
+ current_promise)) {
+ return true;
+ }
+ }
}
+ current = handle(current_reaction->next(), isolate);
}
}
@@ -2124,7 +2102,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
} else if (frame->type() == StackFrame::STUB) {
Code* code = frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
continue;
}
catch_prediction = code->GetBuiltinCatchPrediction();
@@ -2617,6 +2595,8 @@ void Isolate::ClearSerializerData() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
+ // Make sure that the GC does not post any new tasks.
+ heap_.stop_using_tasks();
debug()->Unload();
if (concurrent_recompilation_enabled()) {
@@ -2665,7 +2645,7 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
- compiler_dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
+ compiler_dispatcher_->AbortAll(BlockingBehavior::kBlock);
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
@@ -2689,6 +2669,12 @@ void Isolate::Deinit() {
root_index_map_ = nullptr;
ClearSerializerData();
+
+ for (InstructionStream* stream : off_heap_code_) {
+ CHECK(FLAG_stress_off_heap_code);
+ delete stream;
+ }
+ off_heap_code_.clear();
}
@@ -2774,11 +2760,6 @@ Isolate::~Isolate() {
delete allocator_;
allocator_ = nullptr;
-
-#if USE_SIMULATOR
- Simulator::TearDown(simulator_i_cache_);
- simulator_i_cache_ = nullptr;
-#endif
}
@@ -2845,6 +2826,121 @@ void PrintBuiltinSizes(Isolate* isolate) {
code->instruction_size());
}
}
+
+#ifdef V8_EMBEDDED_BUILTINS
+#ifdef DEBUG
+bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate,
+ int builtin_index) {
+ switch (Builtins::KindOf(builtin_index)) {
+ case Builtins::CPP:
+ case Builtins::TFC:
+ case Builtins::TFH:
+ case Builtins::TFJ:
+ case Builtins::TFS:
+ break;
+ case Builtins::API:
+ case Builtins::ASM:
+ // TODO(jgruber): Extend checks to remaining kinds.
+ return false;
+ }
+
+ Callable callable = Builtins::CallableFor(
+ isolate, static_cast<Builtins::Name>(builtin_index));
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
+ return true;
+ }
+
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
+ Register reg = descriptor.GetRegisterParameter(i);
+ if (reg == kOffHeapTrampolineRegister) return true;
+ }
+
+ return false;
+}
+#endif
+
+void ChangeToOffHeapTrampoline(Isolate* isolate, Handle<Code> code,
+ InstructionStream* stream) {
+ DCHECK(Builtins::IsOffHeapSafe(code->builtin_index()));
+ HandleScope scope(isolate);
+
+ constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+
+ // Generate replacement code that simply tail-calls the off-heap code.
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(
+ !BuiltinAliasesOffHeapTrampolineRegister(isolate, code->builtin_index()));
+ DCHECK(!masm.has_frame());
+ {
+ FrameScope scope(&masm, StackFrame::NONE);
+ masm.JumpToInstructionStream(stream);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+
+ // Hack in an empty reloc info to satisfy the GC.
+ DCHECK_EQ(0, desc.reloc_size);
+ Handle<ByteArray> reloc_info =
+ isolate->factory()->NewByteArray(desc.reloc_size, TENURED);
+ code->set_relocation_info(*reloc_info);
+
+ // Overwrites the original code.
+ CHECK_LE(desc.instr_size, code->instruction_size());
+ CHECK_IMPLIES(code->has_safepoint_info(),
+ desc.instr_size <= code->safepoint_table_offset());
+ code->CopyFrom(desc);
+
+ // TODO(jgruber): CopyFrom isn't intended to overwrite existing code, and
+ // doesn't update things like instruction_size. The result is a code object in
+ // which the first instructions are overwritten while the rest remain intact
+ // (but are never executed). That's fine for our current purposes, just
+ // manually zero the trailing part.
+
+ DCHECK_LE(desc.instr_size, code->instruction_size());
+ byte* trailing_instruction_start =
+ code->instruction_start() + desc.instr_size;
+ int instruction_size = code->instruction_size();
+ if (code->has_safepoint_info()) {
+ CHECK_LE(code->safepoint_table_offset(), code->instruction_size());
+ instruction_size = code->safepoint_table_offset();
+ CHECK_LE(desc.instr_size, instruction_size);
+ }
+ size_t trailing_instruction_size = instruction_size - desc.instr_size;
+ std::memset(trailing_instruction_start, 0, trailing_instruction_size);
+}
+
+void LogInstructionStream(Isolate* isolate, Code* code,
+ const InstructionStream* stream) {
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogInstructionStream(code, stream);
+ }
+}
+
+void MoveBuiltinsOffHeap(Isolate* isolate) {
+ DCHECK(FLAG_stress_off_heap_code);
+ HandleScope scope(isolate);
+ Builtins* builtins = isolate->builtins();
+
+ // Lazy deserialization would defeat our off-heap stress test (we'd
+ // deserialize later without moving off-heap), so force eager
+ // deserialization.
+ Snapshot::EnsureAllBuiltinsAreDeserialized(isolate);
+
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsOffHeapSafe(i)) continue;
+ Handle<Code> code(builtins->builtin(i));
+ InstructionStream* stream = new InstructionStream(*code);
+ LogInstructionStream(isolate, *code, stream);
+ ChangeToOffHeapTrampoline(isolate, code, stream);
+ isolate->PushOffHeapCode(stream);
+ }
+}
+#endif // V8_EMBEDDED_BUILTINS
} // namespace
bool Isolate::Init(StartupDeserializer* des) {
@@ -2954,6 +3050,9 @@ bool Isolate::Init(StartupDeserializer* des) {
if (create_heap_objects) {
// Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.push_back(heap_.undefined_value());
+#ifdef V8_EMBEDDED_BUILTINS
+ builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
+#endif
}
InitializeThreadLocal();
@@ -2986,6 +3085,14 @@ bool Isolate::Init(StartupDeserializer* des) {
store_stub_cache_->Initialize();
setup_delegate_->SetupInterpreter(interpreter_);
+#ifdef V8_EMBEDDED_BUILTINS
+ if (create_heap_objects) {
+ builtins_constants_table_builder_->Finalize();
+ delete builtins_constants_table_builder_;
+ builtins_constants_table_builder_ = nullptr;
+ }
+#endif // V8_EMBEDDED_BUILTINS
+
heap_.NotifyDeserializationComplete();
}
delete setup_delegate_;
@@ -2993,6 +3100,15 @@ bool Isolate::Init(StartupDeserializer* des) {
if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code && !serializer_enabled()) {
+ // Artificially move code off-heap to help find & verify related code
+ // paths. Lazy deserialization should be off to avoid confusion around
+ // replacing just the kDeserializeLazy code object.
+ MoveBuiltinsOffHeap(this);
+ }
+#endif
+
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
clear_pending_message();
@@ -3162,6 +3278,12 @@ void Isolate::DumpAndResetStats() {
}
}
+void Isolate::AbortConcurrentOptimization(BlockingBehavior behavior) {
+ if (concurrent_recompilation_enabled()) {
+ DisallowHeapAllocation no_recursive_gc;
+ optimizing_compile_dispatcher()->Flush(behavior);
+ }
+}
CompilationStatistics* Isolate::GetTurboStatistics() {
if (turbo_statistics() == nullptr)
@@ -3176,8 +3298,7 @@ CodeTracer* Isolate::GetCodeTracer() {
}
bool Isolate::use_optimizer() {
- return FLAG_opt && !serializer_enabled_ &&
- CpuFeatures::SupportsCrankshaft() &&
+ return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
!is_precise_count_code_coverage() && !is_block_count_code_coverage();
}
@@ -3368,6 +3489,32 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver) {
return !receiver->HasProxyInPrototype(this);
}
+bool Isolate::IsPromiseHookProtectorIntact() {
+ PropertyCell* promise_hook_cell = heap()->promise_hook_protector();
+ bool is_promise_hook_protector_intact =
+ Smi::ToInt(promise_hook_cell->value()) == kProtectorValid;
+ DCHECK_IMPLIES(is_promise_hook_protector_intact,
+ !promise_hook_or_debug_is_active_);
+ return is_promise_hook_protector_intact;
+}
+
+bool Isolate::IsPromiseThenLookupChainIntact() {
+ PropertyCell* promise_then_cell = heap()->promise_then_protector();
+ bool is_promise_then_protector_intact =
+ Smi::ToInt(promise_then_cell->value()) == kProtectorValid;
+ return is_promise_then_protector_intact;
+}
+
+bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
+ DisallowHeapAllocation no_gc;
+ if (!receiver->IsJSPromise()) return false;
+ if (!IsInAnyContext(receiver->map()->prototype(),
+ Context::PROMISE_PROTOTYPE_INDEX)) {
+ return false;
+ }
+ return IsPromiseThenLookupChainIntact();
+}
+
void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map()->is_prototype_map()) return;
@@ -3394,11 +3541,11 @@ void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(!IsArrayConstructorIntact());
}
-void Isolate::InvalidateArraySpeciesProtector() {
+void Isolate::InvalidateSpeciesProtector() {
DCHECK(factory()->species_protector()->value()->IsSmi());
- DCHECK(IsArraySpeciesLookupChainIntact());
+ DCHECK(IsSpeciesLookupChainIntact());
factory()->species_protector()->set_value(Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsArraySpeciesLookupChainIntact());
+ DCHECK(!IsSpeciesLookupChainIntact());
}
void Isolate::InvalidateStringLengthOverflowProtector() {
@@ -3427,6 +3574,24 @@ void Isolate::InvalidateArrayBufferNeuteringProtector() {
DCHECK(!IsArrayBufferNeuteringIntact());
}
+void Isolate::InvalidatePromiseHookProtector() {
+ DCHECK(factory()->promise_hook_protector()->value()->IsSmi());
+ DCHECK(IsPromiseHookProtectorIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->promise_hook_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsPromiseHookProtectorIntact());
+}
+
+void Isolate::InvalidatePromiseThenProtector() {
+ DCHECK(factory()->promise_then_protector()->value()->IsSmi());
+ DCHECK(IsPromiseThenLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->promise_then_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsPromiseThenLookupChainIntact());
+}
+
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -3588,7 +3753,11 @@ void Isolate::FireCallCompletedCallback() {
}
void Isolate::DebugStateUpdated() {
- promise_hook_or_debug_is_active_ = promise_hook_ || debug()->is_active();
+ bool promise_hook_or_debug_is_active = promise_hook_ || debug()->is_active();
+ if (promise_hook_or_debug_is_active && IsPromiseHookProtectorIntact()) {
+ InvalidatePromiseHookProtector();
+ }
+ promise_hook_or_debug_is_active_ = promise_hook_or_debug_is_active;
}
namespace {
@@ -3698,83 +3867,16 @@ void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
-void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception) {
- Handle<Object> value(info->value(), this);
- Handle<Object> tasks(info->tasks(), this);
- Handle<JSFunction> promise_handle_fn = promise_handle();
- Handle<Object> undefined = factory()->undefined_value();
- Handle<Object> deferred_promise(info->deferred_promise(), this);
-
- if (deferred_promise->IsFixedArray()) {
- DCHECK(tasks->IsFixedArray());
- Handle<FixedArray> deferred_promise_arr =
- Handle<FixedArray>::cast(deferred_promise);
- Handle<FixedArray> deferred_on_resolve_arr(
- FixedArray::cast(info->deferred_on_resolve()), this);
- Handle<FixedArray> deferred_on_reject_arr(
- FixedArray::cast(info->deferred_on_reject()), this);
- Handle<FixedArray> tasks_arr = Handle<FixedArray>::cast(tasks);
- for (int i = 0; i < deferred_promise_arr->length(); i++) {
- Handle<Object> argv[] = {value, handle(tasks_arr->get(i), this),
- handle(deferred_promise_arr->get(i), this),
- handle(deferred_on_resolve_arr->get(i), this),
- handle(deferred_on_reject_arr->get(i), this)};
- *result = Execution::TryCall(
- this, promise_handle_fn, undefined, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
- // If execution is terminating, just bail out.
- if (result->is_null() && maybe_exception->is_null()) {
- return;
- }
- }
- } else {
- Handle<Object> argv[] = {value, tasks, deferred_promise,
- handle(info->deferred_on_resolve(), this),
- handle(info->deferred_on_reject(), this)};
- *result = Execution::TryCall(
- this, promise_handle_fn, undefined, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
- }
-}
-
-void Isolate::PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception) {
- Handle<JSReceiver> thenable(info->thenable(), this);
- Handle<JSFunction> resolve(info->resolve(), this);
- Handle<JSFunction> reject(info->reject(), this);
- Handle<JSReceiver> then(info->then(), this);
- Handle<Object> argv[] = {resolve, reject};
- *result =
- Execution::TryCall(this, then, thenable, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
-
- Handle<Object> reason;
- if (maybe_exception->ToHandle(&reason)) {
- DCHECK(result->is_null());
- Handle<Object> reason_arg[] = {reason};
- *result = Execution::TryCall(
- this, reject, factory()->undefined_value(), arraysize(reason_arg),
- reason_arg, Execution::MessageHandling::kReport, maybe_exception);
- }
-}
-
-void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
- DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
- microtask->IsPromiseResolveThenableJobInfo() ||
- microtask->IsPromiseReactionJobInfo());
+void Isolate::EnqueueMicrotask(Handle<Microtask> microtask) {
Handle<FixedArray> queue(heap()->microtask_queue(), this);
int num_tasks = pending_microtask_count();
- DCHECK(num_tasks <= queue->length());
- if (num_tasks == 0) {
- queue = factory()->NewFixedArray(8);
- heap()->set_microtask_queue(*queue);
- } else if (num_tasks == queue->length()) {
- queue = factory()->CopyFixedArrayAndGrow(queue, num_tasks);
+ DCHECK_LE(num_tasks, queue->length());
+ if (num_tasks == queue->length()) {
+ queue = factory()->CopyFixedArrayAndGrow(queue, std::max(num_tasks, 8));
heap()->set_microtask_queue(*queue);
}
+ DCHECK_LE(8, queue->length());
+ DCHECK_LT(num_tasks, queue->length());
DCHECK(queue->get(num_tasks)->IsUndefined(this));
queue->set(num_tasks, *microtask);
set_pending_microtask_count(num_tasks + 1);
@@ -3785,100 +3887,25 @@ void Isolate::RunMicrotasks() {
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
- is_running_microtasks_ = true;
- RunMicrotasksInternal();
- is_running_microtasks_ = false;
- FireMicrotasksCompletedCallback();
-}
-
+ if (pending_microtask_count()) {
+ is_running_microtasks_ = true;
+ TRACE_EVENT0("v8.execute", "RunMicrotasks");
+ TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
-void Isolate::RunMicrotasksInternal() {
- if (!pending_microtask_count()) return;
- TRACE_EVENT0("v8.execute", "RunMicrotasks");
- TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
-
- do {
- HandleScope handle_scope(this);
- set_microtask_queue_bailout_index(-1);
- set_microtask_queue_bailout_count(-1);
+ HandleScope scope(this);
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
this, Execution::MessageHandling::kReport, &maybe_exception);
+ // If execution is terminating, just bail out.
if (maybe_result.is_null() && maybe_exception.is_null()) {
heap()->set_microtask_queue(heap()->empty_fixed_array());
set_pending_microtask_count(0);
- return;
}
-
- Handle<Object> result = maybe_result.ToHandleChecked();
- if (result->IsUndefined(this)) return;
-
- Handle<FixedArray> queue = Handle<FixedArray>::cast(result);
- int num_tasks = microtask_queue_bailout_count();
- DCHECK_GE(microtask_queue_bailout_index(), 0);
-
- Isolate* isolate = this;
- FOR_WITH_HANDLE_SCOPE(
- isolate, int, i = microtask_queue_bailout_index(), i, i < num_tasks,
- i++, {
- Handle<Object> microtask(queue->get(i), this);
-
- if (microtask->IsCallHandlerInfo()) {
- Handle<CallHandlerInfo> callback_info =
- Handle<CallHandlerInfo>::cast(microtask);
- v8::MicrotaskCallback callback =
- v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
- void* data = v8::ToCData<void*>(callback_info->data());
- callback(data);
- } else {
- SaveContext save(this);
- Context* context;
- if (microtask->IsJSFunction()) {
- context = Handle<JSFunction>::cast(microtask)->context();
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- context = Handle<PromiseResolveThenableJobInfo>::cast(microtask)
- ->context();
- } else {
- context =
- Handle<PromiseReactionJobInfo>::cast(microtask)->context();
- }
-
- set_context(context->native_context());
- handle_scope_implementer_->EnterMicrotaskContext(
- Handle<Context>(context, this));
-
- MaybeHandle<Object> result;
- MaybeHandle<Object> maybe_exception;
-
- if (microtask->IsJSFunction()) {
- Handle<JSFunction> microtask_function =
- Handle<JSFunction>::cast(microtask);
- result = Execution::TryCall(
- this, microtask_function, factory()->undefined_value(), 0,
- nullptr, Execution::MessageHandling::kReport,
- &maybe_exception);
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo>::cast(microtask),
- &result, &maybe_exception);
- } else {
- PromiseReactionJob(
- Handle<PromiseReactionJobInfo>::cast(microtask), &result,
- &maybe_exception);
- }
-
- handle_scope_implementer_->LeaveMicrotaskContext();
-
- // If execution is terminating, just bail out.
- if (result.is_null() && maybe_exception.is_null()) {
- // Clear out any remaining callbacks in the queue.
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- set_pending_microtask_count(0);
- return;
- }
- }
- });
- } while (pending_microtask_count() > 0);
+ CHECK_EQ(0, pending_microtask_count());
+ CHECK_EQ(0, heap()->microtask_queue()->length());
+ is_running_microtasks_ = false;
+ }
+ FireMicrotasksCompletedCallback();
}
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 8eca55ffd6..5538992af1 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -10,23 +10,23 @@
#include <queue>
#include <vector>
-#include "include/v8-debug.h"
+#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/atomicops.h"
+#include "src/base/macros.h"
#include "src/builtins/builtins.h"
#include "src/contexts.h"
#include "src/date.h"
#include "src/debug/debug-interface.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
-#include "src/global-handles.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/heap/heap.h"
#include "src/messages.h"
#include "src/objects/code.h"
-#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
-#include "src/zone/zone.h"
+#include "src/unicode.h"
namespace v8 {
@@ -49,6 +49,7 @@ class AddressToIndexHashMap;
class AstStringConstants;
class BasicBlockProfiler;
class Bootstrapper;
+class BuiltinsConstantsTableBuilder;
class CallInterfaceDescriptorData;
class CancelableTaskManager;
class CodeEventDispatcher;
@@ -67,6 +68,7 @@ class Debug;
class DeoptimizerData;
class DescriptorLookupCache;
class EmptyStatement;
+class EternalHandles;
class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
@@ -75,8 +77,10 @@ class HeapObjectToIndexHashMap;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
+class InstructionStream;
class Logger;
class MaterializedObjectStore;
+class Microtask;
class OptimizingCompileDispatcher;
class PromiseOnStack;
class Redirection;
@@ -372,9 +376,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
- int microtask_queue_bailout_index_;
- int microtask_queue_bailout_count_;
-
private:
void InitializeInternal();
@@ -382,17 +383,6 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-#if USE_SIMULATOR
-
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr)
-#else
-
-#define ISOLATE_INIT_SIMULATOR_LIST(V)
-
-#endif
-
-
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
@@ -453,8 +443,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
V(int, last_stack_frame_info_id, 0) \
- V(int, last_console_context_id, 0) \
- ISOLATE_INIT_SIMULATOR_LIST(V)
+ V(int, last_console_context_id, 0)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
@@ -675,18 +664,6 @@ class Isolate {
return &thread_local_top_.js_entry_sp_;
}
- THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_index)
- Address microtask_queue_bailout_index_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.microtask_queue_bailout_index_);
- }
-
- THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_count)
- Address microtask_queue_bailout_count_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.microtask_queue_bailout_count_);
- }
-
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
inline Handle<JSGlobalObject> global_object();
@@ -743,16 +720,10 @@ class Isolate {
Handle<String> StackTraceString();
// Stores a stack trace in a stack-allocated temporary buffer which will
// end up in the minidump for debugging purposes.
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
- void* ptr2, unsigned int magic2));
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
- void* ptr2, void* ptr3, void* ptr4,
- void* ptr5, void* ptr6, void* ptr7,
- void* ptr8, unsigned int magic2));
- NO_INLINE(void PushCodeObjectsAndDie(unsigned int magic, void* ptr1,
- void* ptr2, void* ptr3, void* ptr4,
- void* ptr5, void* ptr6, void* ptr7,
- void* ptr8, unsigned int magic2));
+ NO_INLINE(void PushStackTraceAndDie(void* ptr1 = nullptr,
+ void* ptr2 = nullptr,
+ void* ptr3 = nullptr,
+ void* ptr4 = nullptr));
Handle<FixedArray> CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
@@ -1104,7 +1075,7 @@ class Isolate {
bool IsNoElementsProtectorIntact(Context* context);
bool IsNoElementsProtectorIntact();
- inline bool IsArraySpeciesLookupChainIntact();
+ inline bool IsSpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
inline bool IsStringLengthOverflowIntact();
@@ -1116,6 +1087,15 @@ class Isolate {
// Make sure we do check for neutered array buffers.
inline bool IsArrayBufferNeuteringIntact();
+ // Disable promise optimizations if promise (debug) hooks have ever been
+ // active.
+ bool IsPromiseHookProtectorIntact();
+
+ // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
+ // initial %PromisePrototype% yields the initial method.
+ bool IsPromiseThenLookupChainIntact();
+ bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
+
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
@@ -1131,11 +1111,13 @@ class Isolate {
UpdateNoElementsProtectorOnSetElement(object);
}
void InvalidateArrayConstructorProtector();
- void InvalidateArraySpeciesProtector();
+ void InvalidateSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
void InvalidateArrayBufferNeuteringProtector();
+ V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
+ void InvalidatePromiseThenProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1161,6 +1143,9 @@ class Isolate {
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
return optimizing_compile_dispatcher_;
}
+ // Flushes all pending concurrent optimzation jobs from the optimizing
+ // compile dispatcher's queue.
+ void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
int id() const { return static_cast<int>(id_); }
@@ -1176,8 +1161,8 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- bool force_slow_path() { return force_slow_path_; }
-
+ void set_force_slow_path(bool v) { force_slow_path_ = v; }
+ bool force_slow_path() const { return force_slow_path_; }
bool* force_slow_path_address() { return &force_slow_path_; }
V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
@@ -1215,14 +1200,7 @@ class Isolate {
void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
- void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception);
- void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception);
-
- void EnqueueMicrotask(Handle<Object> microtask);
+ void EnqueueMicrotask(Handle<Microtask> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1266,6 +1244,16 @@ class Isolate {
return &partial_snapshot_cache_;
}
+ void PushOffHeapCode(InstructionStream* stream) {
+ off_heap_code_.emplace_back(stream);
+ }
+
+#ifdef V8_EMBEDDED_BUILTINS
+ BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
+ return builtins_constants_table_builder_;
+ }
+#endif
+
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
}
@@ -1317,10 +1305,6 @@ class Isolate {
PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
-#ifdef USE_SIMULATOR
- base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
-#endif
-
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
bool allow_atomics_wait() { return allow_atomics_wait_; }
@@ -1476,8 +1460,6 @@ class Isolate {
// then return true.
bool PropagatePendingExceptionToExternalTryCatch();
- void RunMicrotasksInternal();
-
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1639,6 +1621,18 @@ class Isolate {
std::vector<Object*> partial_snapshot_cache_;
+ // Stores off-heap instruction streams. Only used if --stress-off-heap-code
+ // is enabled.
+ // TODO(jgruber,v8:6666): Remove once isolate-independent builtins are
+ // implemented. Also remove friend class below.
+ std::vector<InstructionStream*> off_heap_code_;
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // Used during builtins compilation to build the builtins constants table,
+ // which is stored on the root list prior to serialization.
+ BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
+#endif
+
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
FutexWaitListNode futex_wait_list_node_;
@@ -1650,10 +1644,6 @@ class Isolate {
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
-#ifdef USE_SIMULATOR
- base::Mutex simulator_i_cache_mutex_;
-#endif
-
bool allow_atomics_wait_;
ManagedObjectFinalizer managed_object_finalizers_list_;
@@ -1671,17 +1661,18 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class heap::HeapTester;
+ friend class InstructionStream;
friend class OptimizingCompileDispatcher;
- friend class SweeperThread;
- friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
+ friend class SweeperThread;
friend class TestIsolate;
friend class ThreadId;
+ friend class ThreadManager;
friend class v8::Isolate;
friend class v8::Locker;
- friend class v8::Unlocker;
friend class v8::SnapshotCreator;
+ friend class v8::Unlocker;
friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
const char*);
@@ -1897,6 +1888,29 @@ class CodeTracer final : public Malloced {
int scope_depth_;
};
+class StackTraceFailureMessage {
+ public:
+ explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
+ void* ptr2 = nullptr, void* ptr3 = nullptr,
+ void* ptr4 = nullptr);
+
+ V8_NOINLINE void Print() volatile;
+
+ static const uintptr_t kStartMarker = 0xdecade30;
+ static const uintptr_t kEndMarker = 0xdecade31;
+ static const int kStacktraceBufferSize = 32 * KB;
+
+ uintptr_t start_marker_ = kStartMarker;
+ void* isolate_;
+ void* ptr1_;
+ void* ptr2_;
+ void* ptr3_;
+ void* ptr4_;
+ void* code_objects_[4];
+ char js_stack_trace_[kStacktraceBufferSize];
+ uintptr_t end_marker_ = kEndMarker;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
index 0108c712e3..de2152c056 100644
--- a/deps/v8/src/js/OWNERS
+++ b/deps/v8/src/js/OWNERS
@@ -7,7 +7,6 @@ ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
littledan@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 7605fc1a7d..46096a0ba5 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1153,81 +1153,6 @@ DEFINE_METHOD_LEN(
1 /* Set function length */
);
-
-// ES6, draft 10-14-14, section 22.1.2.1
-DEFINE_METHOD_LEN(
- GlobalArray,
- 'from'(arrayLike, mapfn, receiver) {
- var items = TO_OBJECT(arrayLike);
- var mapping = !IS_UNDEFINED(mapfn);
-
- if (mapping) {
- if (!IS_CALLABLE(mapfn)) {
- throw %make_type_error(kCalledNonCallable, mapfn);
- }
- }
-
- var iterable = GetMethod(items, iteratorSymbol);
- var k;
- var result;
- var mappedValue;
- var nextValue;
-
- if (!IS_UNDEFINED(iterable)) {
- result = %IsConstructor(this) ? new this() : [];
- k = 0;
-
- for (nextValue of
- { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- %CreateDataProperty(result, k, mappedValue);
- k++;
- }
- result.length = k;
- return result;
- } else {
- var len = TO_LENGTH(items.length);
- result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
-
- for (k = 0; k < len; ++k) {
- nextValue = items[k];
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- %CreateDataProperty(result, k, mappedValue);
- }
-
- result.length = k;
- return result;
- }
- },
- 1 /* Set function length. */
-);
-
-// ES6, draft 05-22-14, section 22.1.2.3
-DEFINE_METHOD(
- GlobalArray,
- of(...args) {
- var length = args.length;
- var constructor = this;
- // TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = %IsConstructor(constructor) ? new constructor(length) : [];
- for (var i = 0; i < length; i++) {
- %CreateDataProperty(array, i, args[i]);
- }
- array.length = length;
- return array;
- }
-);
-
-// -------------------------------------------------------------------
-
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 32f826691d..0d6b670367 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -177,13 +177,7 @@ extrasUtils.createPrivateSymbol = function createPrivateSymbol(name) {
//
// Technically they could all be derived from combinations of
// Function.prototype.{bind,call,apply} but that introduces lots of layers of
-// indirection and slowness given how un-optimized bind is.
-
-extrasUtils.simpleBind = function simpleBind(func, thisArg) {
- return function(...args) {
- return %reflect_apply(func, thisArg, args);
- };
-};
+// indirection.
extrasUtils.uncurryThis = function uncurryThis(func) {
return function(thisArg, ...args) {
@@ -191,11 +185,6 @@ extrasUtils.uncurryThis = function uncurryThis(func) {
};
};
-// We pass true to trigger the debugger's on exception handler.
-extrasUtils.rejectPromise = function rejectPromise(promise, reason) {
- %promise_internal_reject(promise, reason, true);
-}
-
extrasUtils.markPromiseAsHandled = function markPromiseAsHandled(promise) {
%PromiseMarkAsHandled(promise);
};
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 7fa638fa89..18998cf9be 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -15,19 +15,11 @@
var ArrayToString = utils.ImportNow("ArrayToString");
var GetIterator;
var GetMethod;
-var GlobalArray = global.Array;
-var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
-var GlobalObject = global.Object;
var InnerArrayJoin;
var InnerArraySort;
var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
-var MathMax = global.Math.max;
-var MathMin = global.Math.min;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array, 1)
@@ -39,6 +31,8 @@ FUNCTION(Int32Array, 4)
FUNCTION(Float32Array, 4)
FUNCTION(Float64Array, 8)
FUNCTION(Uint8ClampedArray, 1)
+FUNCTION(BigUint64Array, 8)
+FUNCTION(BigInt64Array, 8)
endmacro
macro DECLARE_GLOBALS(NAME, SIZE)
@@ -47,14 +41,6 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
-macro IS_ARRAYBUFFER(arg)
-(%_ClassOf(arg) === 'ArrayBuffer')
-endmacro
-
-macro IS_SHAREDARRAYBUFFER(arg)
-(%_ClassOf(arg) === 'SharedArrayBuffer')
-endmacro
-
macro IS_TYPEDARRAY(arg)
(%_IsTypedArray(arg))
endmacro
@@ -69,25 +55,6 @@ utils.Import(function(from) {
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
});
-// ES2015 7.3.20
-function SpeciesConstructor(object, defaultConstructor) {
- var constructor = object.constructor;
- if (IS_UNDEFINED(constructor)) {
- return defaultConstructor;
- }
- if (!IS_RECEIVER(constructor)) {
- throw %make_type_error(kConstructorNotReceiver);
- }
- var species = constructor[speciesSymbol];
- if (IS_NULL_OR_UNDEFINED(species)) {
- return defaultConstructor;
- }
- if (%IsConstructor(species)) {
- return species;
- }
- throw %make_type_error(kSpeciesNotConstructor);
-}
-
// --------------- Typed Arrays ---------------------
// ES6 section 22.2.3.5.1 ValidateTypedArray ( O )
@@ -98,20 +65,6 @@ function ValidateTypedArray(array, methodName) {
throw %make_type_error(kDetachedOperation, methodName);
}
-function TypedArrayDefaultConstructor(typedArray) {
- switch (%_ClassOf(typedArray)) {
-macro TYPED_ARRAY_CONSTRUCTOR_CASE(NAME, ELEMENT_SIZE)
- case "NAME":
- return GlobalNAME;
-endmacro
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR_CASE)
- }
- // The TypeError should not be generated since all callers should
- // have already called ValidateTypedArray.
- throw %make_type_error(kIncompatibleMethodReceiver,
- "TypedArrayDefaultConstructor", this);
-}
-
function TypedArrayCreate(constructor, arg0, arg1, arg2) {
if (IS_UNDEFINED(arg1)) {
var newTypedArray = new constructor(arg0);
@@ -125,174 +78,6 @@ function TypedArrayCreate(constructor, arg0, arg1, arg2) {
return newTypedArray;
}
-function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2) {
- var defaultConstructor = TypedArrayDefaultConstructor(exemplar);
- var constructor = SpeciesConstructor(exemplar, defaultConstructor);
- return TypedArrayCreate(constructor, arg0, arg1, arg2);
-}
-
-macro TYPED_ARRAY_CONSTRUCTOR(NAME, ELEMENT_SIZE)
-function NAMEConstructByIterable(obj, iterable, iteratorFn) {
- if (%IterableToListCanBeElided(iterable)) {
- // This .length access is unobservable, because it being observable would
- // mean that iteration has side effects, and we wouldn't reach this path.
- %typed_array_construct_by_array_like(
- obj, iterable, iterable.length, ELEMENT_SIZE);
- } else {
- var list = new InternalArray();
- // Reading the Symbol.iterator property of iterable twice would be
- // observable with getters, so instead, we call the function which
- // was already looked up, and wrap it in another iterable. The
- // __proto__ of the new iterable is set to null to avoid any chance
- // of modifications to Object.prototype being observable here.
- var iterator = %_Call(iteratorFn, iterable);
- var newIterable = {
- __proto__: null
- };
- // TODO(littledan): Computed properties don't work yet in nosnap.
- // Rephrase when they do.
- newIterable[iteratorSymbol] = function() { return iterator; }
- for (var value of newIterable) {
- list.push(value);
- }
- %typed_array_construct_by_array_like(obj, list, list.length, ELEMENT_SIZE);
- }
-}
-
-// ES#sec-typedarray-typedarray TypedArray ( typedArray )
-function NAMEConstructByTypedArray(obj, typedArray) {
- // TODO(littledan): Throw on detached typedArray
- var srcData = %TypedArrayGetBuffer(typedArray);
- var length = %_TypedArrayGetLength(typedArray);
- var byteLength = %_ArrayBufferViewGetByteLength(typedArray);
- var newByteLength = length * ELEMENT_SIZE;
- %typed_array_construct_by_array_like(obj, typedArray, length, ELEMENT_SIZE);
- // The spec requires that constructing a typed array using a SAB-backed typed
- // array use the ArrayBuffer constructor, not the species constructor. See
- // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
- var bufferConstructor = IS_SHAREDARRAYBUFFER(srcData)
- ? GlobalArrayBuffer
- : SpeciesConstructor(srcData, GlobalArrayBuffer);
- var prototype = bufferConstructor.prototype;
- // TODO(littledan): Use the right prototype based on bufferConstructor's realm
- if (IS_RECEIVER(prototype) && prototype !== GlobalArrayBufferPrototype) {
- %InternalSetPrototype(%TypedArrayGetBuffer(obj), prototype);
- }
-}
-
-function NAMEConstructor(arg1, arg2, arg3) {
- if (!IS_UNDEFINED(new.target)) {
- if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
- %typed_array_construct_by_array_buffer(
- this, arg1, arg2, arg3, ELEMENT_SIZE);
- } else if (IS_TYPEDARRAY(arg1)) {
- NAMEConstructByTypedArray(this, arg1);
- } else if (IS_RECEIVER(arg1)) {
- var iteratorFn = arg1[iteratorSymbol];
- if (IS_UNDEFINED(iteratorFn)) {
- %typed_array_construct_by_array_like(
- this, arg1, arg1.length, ELEMENT_SIZE);
- } else {
- NAMEConstructByIterable(this, arg1, iteratorFn);
- }
- } else {
- %typed_array_construct_by_length(this, arg1, ELEMENT_SIZE);
- }
- } else {
- throw %make_type_error(kConstructorNotFunction, "NAME")
- }
-}
-
-function NAMESubArray(begin, end) {
- var beginInt = TO_INTEGER(begin);
- if (!IS_UNDEFINED(end)) {
- var endInt = TO_INTEGER(end);
- var srcLength = %_TypedArrayGetLength(this);
- } else {
- var srcLength = %_TypedArrayGetLength(this);
- var endInt = srcLength;
- }
-
- if (beginInt < 0) {
- beginInt = MathMax(0, srcLength + beginInt);
- } else {
- beginInt = MathMin(beginInt, srcLength);
- }
-
- if (endInt < 0) {
- endInt = MathMax(0, srcLength + endInt);
- } else {
- endInt = MathMin(endInt, srcLength);
- }
-
- if (endInt < beginInt) {
- endInt = beginInt;
- }
-
- var newLength = endInt - beginInt;
- var beginByteOffset =
- %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
- beginByteOffset, newLength);
-}
-endmacro
-
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
-
-DEFINE_METHOD(
- GlobalTypedArray.prototype,
- subarray(begin, end) {
- switch (%_ClassOf(this)) {
-macro TYPED_ARRAY_SUBARRAY_CASE(NAME, ELEMENT_SIZE)
- case "NAME":
- return %_Call(NAMESubArray, this, begin, end);
-endmacro
-TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
- }
- throw %make_type_error(kIncompatibleMethodReceiver,
- "get %TypedArray%.prototype.subarray", this);
- }
-);
-
-
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function InnerTypedArrayFilter(f, receiver, array, length, result) {
- var result_length = 0;
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_Call(f, receiver, element, i, array)) {
- %CreateDataProperty(result, result_length, element);
- result_length++;
- }
- }
- }
- return result;
-}
-
-
-// ES6 draft 07-15-13, section 22.2.3.9
-DEFINE_METHOD_LEN(
- GlobalTypedArray.prototype,
- filter(f, thisArg) {
- ValidateTypedArray(this, "%TypeArray%.prototype.filter");
-
- var length = %_TypedArrayGetLength(this);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var result = new InternalArray();
- InnerTypedArrayFilter(f, thisArg, this, length, result);
- var captured = result.length;
- var output = TypedArraySpeciesCreate(this, captured);
- for (var i = 0; i < captured; i++) {
- output[i] = result[i];
- }
- return output;
- },
- 1 /* Set function length. */
-);
-
// ES6 draft 05-18-15, section 22.2.3.25
DEFINE_METHOD(
GlobalTypedArray.prototype,
@@ -339,71 +124,6 @@ DEFINE_METHOD(
}
);
-
-// ES6 draft 08-24-14, section 22.2.2.2
-DEFINE_METHOD(
- GlobalTypedArray,
- of() {
- var length = arguments.length;
- var array = TypedArrayCreate(this, length);
- for (var i = 0; i < length; i++) {
- array[i] = arguments[i];
- }
- return array;
- }
-);
-
-
-// ES#sec-iterabletoarraylike Runtime Semantics: IterableToArrayLike( items )
-function IterableToArrayLike(items) {
- var iterable = GetMethod(items, iteratorSymbol);
- if (!IS_UNDEFINED(iterable)) {
- var internal_array = new InternalArray();
- var i = 0;
- for (var value of
- { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
- internal_array[i] = value;
- i++;
- }
- var array = [];
- %MoveArrayContents(internal_array, array);
- return array;
- }
- return TO_OBJECT(items);
-}
-
-
-// ES#sec-%typedarray%.from
-// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
-DEFINE_METHOD_LEN(
- GlobalTypedArray,
- 'from'(source, mapfn, thisArg) {
- if (!%IsConstructor(this)) throw %make_type_error(kNotConstructor, this);
- var mapping;
- if (!IS_UNDEFINED(mapfn)) {
- if (!IS_CALLABLE(mapfn)) throw %make_type_error(kCalledNonCallable, this);
- mapping = true;
- } else {
- mapping = false;
- }
- var arrayLike = IterableToArrayLike(source);
- var length = TO_LENGTH(arrayLike.length);
- var targetObject = TypedArrayCreate(this, length);
- var value, mappedValue;
- for (var i = 0; i < length; i++) {
- value = arrayLike[i];
- if (mapping) {
- mappedValue = %_Call(mapfn, thisArg, value, i);
- } else {
- mappedValue = value;
- }
- targetObject[i] = mappedValue;
- }
- return targetObject;
- },
- 1 /* Set function length. */
-);
-
// TODO(bmeurer): Migrate this to a proper builtin.
function TypedArrayConstructor() {
throw %make_type_error(kConstructAbstractClass, "TypedArray");
@@ -417,11 +137,4 @@ function TypedArrayConstructor() {
%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
DONT_ENUM);
-
-macro SETUP_TYPED_ARRAY(NAME, ELEMENT_SIZE)
- %SetCode(GlobalNAME, NAMEConstructor);
-endmacro
-
-TYPED_ARRAYS(SETUP_TYPED_ARRAY)
-
})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 57e7fff8c5..2d9593091d 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -416,7 +416,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
if (!follow_expected) {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
- key = ParseJsonInternalizedString();
+ key = ParseJsonString();
if (key.is_null()) return ReportUnexpectedCharacter();
target = TransitionsAccessor(map).FindTransitionToField(key);
@@ -491,7 +491,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
Handle<String> key;
Handle<Object> value;
- key = ParseJsonInternalizedString();
+ key = ParseJsonString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
AdvanceSkipWhitespace();
@@ -812,7 +812,6 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
}
template <bool seq_one_byte>
-template <bool is_internalized>
Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
DCHECK_EQ('"', c0_);
Advance();
@@ -821,7 +820,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
return factory()->empty_string();
}
- if (seq_one_byte && is_internalized) {
+ if (seq_one_byte) {
// Fast path for existing internalized strings. If the the string being
// parsed is not a known internalized string, contains backslashes or
// unexpectedly reaches the end of string, return with an empty handle.
@@ -829,9 +828,13 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
// We intentionally use local variables instead of fields, compute hash
// while we are iterating a string and manually inline StringTable lookup
// here.
- uint32_t running_hash = isolate()->heap()->HashSeed();
+
int position = position_;
uc32 c0 = c0_;
+ uint32_t running_hash = isolate()->heap()->HashSeed();
+ uint32_t index = 0;
+ bool is_array_index = true;
+
do {
if (c0 == '\\') {
c0_ = c0;
@@ -845,6 +848,16 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
position_ = position;
return Handle<String>::null();
}
+ if (is_array_index) {
+ // With leading zero, the string has to be "0" to be a valid index.
+ if (!IsDecimalDigit(c0) || (position > position_ && index == 0)) {
+ is_array_index = false;
+ } else {
+ int d = c0 - '0';
+ is_array_index = index <= 429496729U - ((d + 3) >> 3);
+ index = (index * 10) + d;
+ }
+ }
running_hash = StringHasher::AddCharacterCore(running_hash,
static_cast<uint16_t>(c0));
position++;
@@ -856,9 +869,15 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
c0 = seq_source_->SeqOneByteStringGet(position);
} while (c0 != '"');
int length = position - position_;
- uint32_t hash = (length <= String::kMaxHashCalcLength)
- ? StringHasher::GetHashCore(running_hash)
- : static_cast<uint32_t>(length);
+ uint32_t hash;
+ if (is_array_index) {
+ hash =
+ StringHasher::MakeArrayIndexHash(index, length) >> String::kHashShift;
+ } else if (length <= String::kMaxHashCalcLength) {
+ hash = StringHasher::GetHashCore(running_hash);
+ } else {
+ hash = static_cast<uint32_t>(length);
+ }
Vector<const uint8_t> string_vector(seq_source_->GetChars() + position_,
length);
StringTable* string_table = isolate()->heap()->string_table();
@@ -877,12 +896,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
if (!element->IsTheHole(isolate()) &&
String::cast(element)->IsOneByteEqualTo(string_vector)) {
result = Handle<String>(String::cast(element), isolate());
-#ifdef DEBUG
- uint32_t hash_field =
- (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(static_cast<int>(result->Hash()),
- static_cast<int>(hash_field >> String::kHashShift));
-#endif
+ DCHECK_EQ(result->Hash(),
+ (hash << String::kHashShift) >> String::kHashShift);
break;
}
entry = StringTable::NextProbe(entry, count++, capacity);
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 6566c92e40..d76f642b38 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -75,19 +75,14 @@ class JsonParser BASE_EMBEDDED {
// literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Handle<String> ParseJsonString() {
- return ScanJsonString<false>();
- }
-
bool ParseJsonString(Handle<String> expected);
- Handle<String> ParseJsonInternalizedString() {
- Handle<String> result = ScanJsonString<true>();
+ Handle<String> ParseJsonString() {
+ Handle<String> result = ScanJsonString();
if (result.is_null()) return result;
return factory()->InternalizeString(result);
}
- template <bool is_internalized>
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
// of it. Then scans the rest of the string, adding characters after the
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.h b/deps/v8/src/libplatform/tracing/trace-buffer.h
index 16f3b2a12e..3c756b7a69 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.h
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
-#define SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#ifndef V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#define V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
#include <memory>
#include <vector>
@@ -45,4 +45,4 @@ class TraceBufferRingBuffer : public TraceBuffer {
} // namespace platform
} // namespace v8
-#endif // SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#endif // V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index 43d7cb6a90..7e1bdc24d6 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
-#define SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#ifndef V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#define V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
#include "include/libplatform/v8-tracing.h"
@@ -30,4 +30,4 @@ class JSONTraceWriter : public TraceWriter {
} // namespace platform
} // namespace v8
-#endif // SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#endif // V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 75161fc7d6..492606475e 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -4,7 +4,7 @@
#include "src/libsampler/sampler.h"
-#if V8_OS_POSIX && !V8_OS_CYGWIN
+#if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
#define USE_SIGNALS
@@ -13,7 +13,7 @@
#include <signal.h>
#include <sys/time.h>
-#if !V8_OS_QNX && !V8_OS_FUCHSIA && !V8_OS_AIX
+#if !V8_OS_QNX && !V8_OS_AIX
#include <sys/syscall.h> // NOLINT
#endif
@@ -39,6 +39,28 @@
#include "src/base/win32-headers.h"
+#elif V8_OS_FUCHSIA
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/debug.h>
+#include <zircon/types.h>
+
+// TODO(wez): Remove this once the Fuchsia SDK has rolled.
+#if defined(ZX_THREAD_STATE_REGSET0)
+#define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
+zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
+ uint32_t dummy_out_len = 0;
+ return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
+ &dummy_out_len);
+}
+#if defined(__x86_64__)
+typedef zx_x86_64_general_regs_t zx_thread_state_general_regs_t;
+#else
+typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
+#endif
+#endif // !defined(ZX_THREAD_STATE_GENERAL_REGS)
+
#endif
#include <algorithm>
@@ -336,6 +358,28 @@ class Sampler::PlatformData {
private:
HANDLE profiled_thread_;
};
+
+#elif V8_OS_FUCHSIA
+
+class Sampler::PlatformData {
+ public:
+ PlatformData() {
+ zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
+ &profiled_thread_);
+ }
+ ~PlatformData() {
+ if (profiled_thread_ != ZX_HANDLE_INVALID) {
+ zx_handle_close(profiled_thread_);
+ profiled_thread_ = ZX_HANDLE_INVALID;
+ }
+ }
+
+ zx_handle_t profiled_thread() { return profiled_thread_; }
+
+ private:
+ zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
+};
+
#endif // USE_SIGNALS
@@ -415,7 +459,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
-#if V8_OS_LINUX || V8_OS_FUCHSIA
+#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
@@ -664,6 +708,53 @@ void Sampler::DoSample() {
ResumeThread(profiled_thread);
}
+#elif V8_OS_FUCHSIA
+
+void Sampler::DoSample() {
+ zx_handle_t profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == ZX_HANDLE_INVALID) return;
+
+ if (zx_task_suspend(profiled_thread) != ZX_OK) return;
+
+ // Wait for the target thread to become suspended, or to exit.
+ // TODO(wez): There is currently no suspension count for threads, so there
+ // is a risk that some other caller resumes the thread in-between our suspend
+ // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
+ // deadline to protect against hanging the sampler thread in this case.
+ zx_signals_t signals = 0;
+ zx_status_t suspended = zx_object_wait_one(
+ profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
+ zx_deadline_after(ZX_MSEC(100)), &signals);
+ if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
+ zx_task_resume(profiled_thread, 0);
+ return;
+ }
+
+ // Fetch a copy of its "general register" states.
+ zx_thread_state_general_regs_t thread_state = {};
+ if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
+ &thread_state, sizeof(thread_state)) == ZX_OK) {
+ v8::RegisterState state;
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<void*>(thread_state.rip);
+ state.sp = reinterpret_cast<void*>(thread_state.rsp);
+ state.fp = reinterpret_cast<void*>(thread_state.rbp);
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<void*>(thread_state.pc);
+ state.sp = reinterpret_cast<void*>(thread_state.sp);
+ state.fp = reinterpret_cast<void*>(thread_state.r[29]);
+#endif
+ SampleStack(state);
+ }
+
+ zx_task_resume(profiled_thread, 0);
+}
+
+// TODO(wez): Remove this once the Fuchsia SDK has rolled.
+#if defined(ZX_THREAD_STATE_REGSET0)
+#undef ZX_THREAD_STATE_GENERAL_REGS
+#endif
+
#endif // USE_SIGNALS
} // namespace sampler
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
index 31e8bd2fd8..65c8736d7a 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/locked-queue-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_INL_
-#define V8_LOCKED_QUEUE_INL_
+#ifndef V8_LOCKED_QUEUE_INL_H_
+#define V8_LOCKED_QUEUE_INL_H_
#include "src/base/atomic-utils.h"
#include "src/locked-queue.h"
@@ -88,4 +88,4 @@ inline bool LockedQueue<Record>::Peek(Record* record) const {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_INL_
+#endif // V8_LOCKED_QUEUE_INL_H_
diff --git a/deps/v8/src/locked-queue.h b/deps/v8/src/locked-queue.h
index 5bb97c8a12..1667917329 100644
--- a/deps/v8/src/locked-queue.h
+++ b/deps/v8/src/locked-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_
-#define V8_LOCKED_QUEUE_
+#ifndef V8_LOCKED_QUEUE_H_
+#define V8_LOCKED_QUEUE_H_
#include "src/allocation.h"
#include "src/base/platform/platform.h"
@@ -40,4 +40,4 @@ class LockedQueue final BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_
+#endif // V8_LOCKED_QUEUE_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index f5d5be6848..90023e3731 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -16,6 +16,7 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/libsampler/sampler.h"
@@ -30,6 +31,8 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/unicode-inl.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
#include "src/utils.h"
#include "src/version.h"
@@ -200,6 +203,24 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
+void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
+ wasm::WasmCode* code,
+ wasm::WasmName name) {
+ name_buffer_->Init(tag);
+ if (name.is_empty()) {
+ name_buffer_->AppendBytes("<wasm-unknown>");
+ } else {
+ name_buffer_->AppendBytes(name.start(), name.length());
+ }
+ name_buffer_->AppendByte('-');
+ if (code->IsAnonymous()) {
+ name_buffer_->AppendBytes("<anonymous>");
+ } else {
+ name_buffer_->AppendInt(code->index());
+ }
+ LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size());
+}
+
void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
String* source) {
name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
@@ -207,6 +228,13 @@ void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
+void CodeEventLogger::InstructionStreamCreateEvent(
+ LogEventsAndTags tag, const InstructionStream* stream,
+ const char* description) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendBytes(description);
+ LogRecordedBuffer(stream, name_buffer_->get(), name_buffer_->size());
+}
// Linux perf tool logging support
class PerfBasicLogger : public CodeEventLogger {
@@ -221,6 +249,12 @@ class PerfBasicLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
+ void WriteLogRecordedBuffer(uintptr_t address, int size, const char* name,
+ int name_length);
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -254,6 +288,19 @@ PerfBasicLogger::~PerfBasicLogger() {
perf_output_handle_ = nullptr;
}
+void PerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size,
+ const char* name,
+ int name_length) {
+ // Linux perf expects hex literals without a leading 0x, while some
+ // implementations of printf might prepend one when using the %p format
+ // for pointers, leading to wrongly formatted JIT symbols maps.
+ //
+ // Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
+ // so that we have control over the exact output format.
+ base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n", address,
+ size, name_length, name);
+}
+
void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
@@ -262,6 +309,19 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
return;
}
+ WriteLogRecordedBuffer(reinterpret_cast<uintptr_t>(code->instruction_start()),
+ code->instruction_size(), name, length);
+}
+
+void PerfBasicLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ WriteLogRecordedBuffer(
+ reinterpret_cast<uintptr_t>(code->instructions().start()),
+ code->instructions().length(), name, length);
+}
+
+void PerfBasicLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
// Linux perf expects hex literals without a leading 0x, while some
// implementations of printf might prepend one when using the %p format
// for pointers, leading to wrongly formatted JIT symbols maps.
@@ -269,8 +329,8 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
// Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
// so that we have control over the exact output format.
base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n",
- reinterpret_cast<uintptr_t>(code->instruction_start()),
- code->instruction_size(), length, name);
+ reinterpret_cast<uintptr_t>(stream->bytes()),
+ static_cast<int>(stream->byte_length()), length, name);
}
// Low-level logging support.
@@ -290,6 +350,10 @@ class LowLevelLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
// Low-level profiling event structures.
struct CodeCreateStruct {
@@ -386,6 +450,30 @@ void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
code->instruction_size());
}
+void LowLevelLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ CodeCreateStruct event;
+ event.name_size = length;
+ event.code_address = stream->bytes();
+ event.code_size = static_cast<int32_t>(stream->byte_length());
+ LogWriteStruct(event);
+ LogWriteBytes(name, length);
+ LogWriteBytes(reinterpret_cast<const char*>(stream->bytes()),
+ static_cast<int>(stream->byte_length()));
+}
+
+void LowLevelLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ CodeCreateStruct event;
+ event.name_size = length;
+ event.code_address = code->instructions().start();
+ event.code_size = code->instructions().length();
+ LogWriteStruct(event);
+ LogWriteBytes(name, length);
+ LogWriteBytes(reinterpret_cast<const char*>(code->instructions().start()),
+ code->instructions().length());
+}
+
void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
CodeMoveStruct event;
event.from_address = from->instruction_start();
@@ -425,6 +513,10 @@ class JitLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
JitCodeEventHandler code_event_handler_;
base::Mutex logger_mutex_;
@@ -453,6 +545,32 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
code_event_handler_(&event);
}
+void JitLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = stream->bytes();
+ event.code_len = stream->byte_length();
+ Handle<SharedFunctionInfo> shared_function_handle;
+ event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
+ event.name.str = name;
+ event.name.len = length;
+ code_event_handler_(&event);
+}
+
+void JitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instructions().start();
+ event.code_len = code->instructions().length();
+ event.name.str = name;
+ event.name.len = length;
+ code_event_handler_(&event);
+}
+
void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
base::LockGuard<base::Mutex> guard(&logger_mutex_);
@@ -979,12 +1097,32 @@ namespace {
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
+ AbstractCode::Kind kind, uint8_t* address, int size,
+ base::ElapsedTimer* timer) {
+ msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
+ << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << kind
+ << Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
+ << reinterpret_cast<void*>(address) << Logger::kNext << size
+ << Logger::kNext;
+}
+
+void AppendCodeCreateHeader(Log::MessageBuilder& msg,
+ CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, base::ElapsedTimer* timer) {
+ AppendCodeCreateHeader(msg, tag, code->kind(), code->instruction_start(),
+ code->instruction_size(), timer);
+}
+
+void AppendCodeCreateHeader(Log::MessageBuilder& msg,
+ CodeEventListener::LogEventsAndTags tag,
+ const InstructionStream* stream,
+ base::ElapsedTimer* timer) {
+ // TODO(jgruber,v8:6666): In time, we'll need to support non-builtin streams.
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
- << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << code->kind()
+ << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << Code::BUILTIN
<< Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
- << reinterpret_cast<void*>(code->instruction_start()) << Logger::kNext
- << code->instruction_size() << Logger::kNext;
+ << reinterpret_cast<void*>(stream->bytes()) << Logger::kNext
+ << stream->byte_length() << Logger::kNext;
}
} // namespace
@@ -1026,6 +1164,21 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
msg.WriteToLogFile();
}
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name) {
+ if (!is_logging_code_events()) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION,
+ code->instructions().start(),
+ code->instructions().length(), &timer_);
+ if (name.is_empty()) {
+ msg << "<unknown wasm>";
+ } else {
+ msg << name.start();
+ }
+ msg.WriteToLogFile();
+}
// Although, it is possible to extract source and line from
// the SharedFunctionInfo object, we left it to caller
@@ -1174,6 +1327,17 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
msg.WriteToLogFile();
}
+void Logger::InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) {
+ if (!is_logging_code_events()) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, stream, &timer_);
+ msg << description;
+ msg.WriteToLogFile();
+}
+
void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
if (!is_logging_code_events()) return;
MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
@@ -1489,6 +1653,24 @@ static int EnumerateCompiledFunctions(Heap* heap,
return compiled_funcs_count;
}
+static int EnumerateWasmModules(Heap* heap,
+ Handle<WasmCompiledModule>* modules) {
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
+ int wasm_modules_count = 0;
+
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
+ if (WasmCompiledModule::IsWasmCompiledModule(obj)) {
+ WasmCompiledModule* module = WasmCompiledModule::cast(obj);
+ if (modules != nullptr) {
+ modules[wasm_modules_count] = Handle<WasmCompiledModule>(module);
+ }
+ wasm_modules_count++;
+ }
+ }
+ return wasm_modules_count;
+}
void Logger::LogCodeObject(Object* object) {
AbstractCode* code_object = AbstractCode::cast(object);
@@ -1517,7 +1699,7 @@ void Logger::LogCodeObject(Object* object) {
break;
case AbstractCode::WASM_FUNCTION:
description = "A Wasm function";
- tag = CodeEventListener::STUB_TAG;
+ tag = CodeEventListener::FUNCTION_TAG;
break;
case AbstractCode::JS_TO_WASM_FUNCTION:
description = "A JavaScript to Wasm adapter";
@@ -1545,6 +1727,12 @@ void Logger::LogCodeObject(Object* object) {
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
+void Logger::LogInstructionStream(Code* code, const InstructionStream* stream) {
+ DCHECK(Builtins::IsBuiltin(code));
+ const char* description = isolate_->builtins()->name(code->builtin_index());
+ CodeEventListener::LogEventsAndTags tag = CodeEventListener::BUILTIN_TAG;
+ PROFILE(isolate_, InstructionStreamCreateEvent(tag, stream, description));
+}
void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
@@ -1637,13 +1825,12 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
}
}
-
void Logger::LogCompiledFunctions() {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
const int compiled_funcs_count =
EnumerateCompiledFunctions(heap, nullptr, nullptr);
- ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
+ ScopedVector<Handle<SharedFunctionInfo>> sfis(compiled_funcs_count);
ScopedVector<Handle<AbstractCode> > code_objects(compiled_funcs_count);
EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
@@ -1654,8 +1841,14 @@ void Logger::LogCompiledFunctions() {
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
-}
+ const int compiled_wasm_modules_count = EnumerateWasmModules(heap, nullptr);
+ ScopedVector<Handle<WasmCompiledModule>> modules(compiled_wasm_modules_count);
+ EnumerateWasmModules(heap, modules.start());
+ for (int i = 0; i < compiled_wasm_modules_count; ++i) {
+ modules[i]->LogWasmCodes(isolate_);
+ }
+}
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 8305eb1001..b540c86173 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -74,6 +74,11 @@ class Profiler;
class ProfilerListener;
class RuntimeCallTimer;
class Ticker;
+class WasmCompiledModule;
+
+namespace wasm {
+class WasmCode;
+}
#undef LOG
#define LOG(isolate, Call) \
@@ -176,11 +181,16 @@ class Logger : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column);
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name);
// Emits a code deoptimization event.
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
void RegExpCodeCreateEvent(AbstractCode* code, String* source);
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description);
// Emits a code move event.
void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info record event.
@@ -234,6 +244,7 @@ class Logger : public CodeEventListener {
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code);
+ void LogCompiledModule(Handle<WasmCompiledModule> module);
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
@@ -257,6 +268,9 @@ class Logger : public CodeEventListener {
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
+ // Used for logging off-heap instruction streams.
+ void LogInstructionStream(Code* code, const InstructionStream* stream);
+
private:
explicit Logger(Isolate* isolate);
~Logger();
@@ -379,8 +393,13 @@ class CodeEventLogger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source, int line,
int column) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) override;
void CallbackEvent(Name* name, Address entry_point) override {}
void GetterCallbackEvent(Name* name, Address entry_point) override {}
void SetterCallbackEvent(Name* name, Address entry_point) override {}
@@ -394,6 +413,10 @@ class CodeEventLogger : public CodeEventListener {
virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) = 0;
+ virtual void LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) = 0;
+ virtual void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) = 0;
NameBuffer* name_buffer_;
};
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 71902dff84..58ad9318dd 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -211,8 +211,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
auto root =
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
- unsigned int magic = 0xBBBBBBBB;
- isolate->PushStackTraceAndDie(magic, *receiver, nullptr, magic);
+ isolate->PushStackTraceAndDie(*receiver);
}
return Handle<JSReceiver>::cast(root);
}
@@ -238,6 +237,7 @@ void LookupIterator::ReloadPropertyInformation() {
}
namespace {
+
bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
static uint32_t context_slots[] = {
#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype, size) \
@@ -253,43 +253,49 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
std::begin(context_slots), std::end(context_slots),
[=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
}
+
} // namespace
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (*name_ == heap()->constructor_string()) {
- if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ if (!isolate_->IsSpeciesLookupChainIntact()) return;
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray() || holder_->IsJSTypedArray()) {
+ if (holder_->IsJSArray() || holder_->IsJSPromise() ||
+ holder_->IsJSTypedArray()) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
} else if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
- // Setting the constructor of Array.prototype or %TypedArray%.prototype of
- // any realm also needs to invalidate the species protector.
+ // Setting the constructor of Array.prototype, Promise.prototype or
+ // %TypedArray%.prototype of any realm also needs to invalidate the
+ // @@species protector.
// For typed arrays, we check a prototype of this holder since TypedArrays
// have different prototypes for each type, and their parent prototype is
// pointing the same TYPED_ARRAY_PROTOTYPE.
if (isolate_->IsInAnyContext(*holder_,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(holder_->map()->prototype(),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::
kArrayPrototypeConstructorModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
}
}
} else if (*name_ == heap()->species_symbol()) {
- if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
- // Setting the Symbol.species property of any Array or TypedArray
- // constructor invalidates the species protector
+ if (!isolate_->IsSpeciesLookupChainIntact()) return;
+ // Setting the Symbol.species property of any Array, Promise or TypedArray
+ // constructor invalidates the @@species protector
if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX) ||
+ isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX) ||
IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
}
} else if (*name_ == heap()->is_concat_spreadable_symbol()) {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
@@ -299,6 +305,14 @@ void LookupIterator::InternalUpdateProtector() {
if (holder_->IsJSArray()) {
isolate_->InvalidateArrayIteratorProtector();
}
+ } else if (*name_ == heap()->then_string()) {
+ if (!isolate_->IsPromiseThenLookupChainIntact()) return;
+ // Setting the "then" property on any JSPromise instance or on the
+ // initial %PromisePrototype% invalidates the Promise#then protector.
+ if (holder_->IsJSPromise() ||
+ isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
+ isolate_->InvalidatePromiseThenProtector();
+ }
}
}
@@ -306,35 +320,41 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- Handle<JSObject> holder = GetHolder<JSObject>();
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+ // JSProxy does not have fast properties so we do an early return.
+ DCHECK_IMPLIES(holder->IsJSProxy(), !holder->HasFastProperties());
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
+ if (holder->IsJSProxy()) return;
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- ElementsKind kind = holder->GetElementsKind();
+ ElementsKind kind = holder_obj->GetElementsKind();
ElementsKind to = value->OptimalElementsKind();
if (IsHoleyOrDictionaryElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
if (kind != to) {
- JSObject::TransitionElementsKind(holder, to);
+ JSObject::TransitionElementsKind(holder_obj, to);
}
// Copy the backing store if it is copy-on-write.
if (IsSmiOrObjectElementsKind(to)) {
- JSObject::EnsureWritableFastElements(holder);
+ JSObject::EnsureWritableFastElements(holder_obj);
}
return;
}
- if (holder->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder)->global_dictionary());
+ JSGlobalObject::cast(*holder_obj)->global_dictionary());
Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()));
property_details_ = cell->property_details();
PropertyCell::PrepareForValue(dictionary, dictionary_entry(), value,
property_details_);
return;
}
- if (!holder->HasFastProperties()) return;
+ if (!holder_obj->HasFastProperties()) return;
PropertyConstness new_constness = kConst;
if (FLAG_track_constant_fields) {
@@ -348,7 +368,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
new_constness = kMutable;
}
- Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::PrepareForDataProperty(
old_map, descriptor_number(), new_constness, value);
@@ -361,7 +381,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
- JSObject::MigrateToMap(holder, new_map);
+ JSObject::MigrateToMap(holder_obj, new_map);
ReloadPropertyInformation<false>();
}
@@ -370,38 +390,47 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- Handle<JSObject> holder = GetHolder<JSObject>();
+
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+
+ // Property details can never change for private fields.
+ if (holder->IsJSProxy()) {
+ DCHECK(name()->IsPrivate());
+ return;
+ }
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- DCHECK(!holder->HasFixedTypedArrayElements());
- DCHECK(attributes != NONE || !holder->HasFastElements());
- Handle<FixedArrayBase> elements(holder->elements());
- holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
- attributes);
+ DCHECK(!holder_obj->HasFixedTypedArrayElements());
+ DCHECK(attributes != NONE || !holder_obj->HasFastElements());
+ Handle<FixedArrayBase> elements(holder_obj->elements());
+ holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
+ number_, value, attributes);
ReloadPropertyInformation<true>();
- } else if (holder->HasFastProperties()) {
- Handle<Map> old_map(holder->map(), isolate_);
+ } else if (holder_obj->HasFastProperties()) {
+ Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::ReconfigureExistingProperty(
old_map, descriptor_number(), i::kData, attributes);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
new_map = Map::PrepareForDataProperty(new_map, descriptor_number(),
kMutable, value);
- JSObject::MigrateToMap(holder, new_map);
+ JSObject::MigrateToMap(holder_obj, new_map);
ReloadPropertyInformation<false>();
}
- if (!IsElement() && !holder->HasFastProperties()) {
+ if (!IsElement() && !holder_obj->HasFastProperties()) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
- if (holder->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder)->global_dictionary());
+ JSGlobalObject::cast(*holder_obj)->global_dictionary());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
dictionary, dictionary_entry(), value, details);
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(holder->property_dictionary());
+ Handle<NameDictionary> dictionary(holder_obj->property_dictionary());
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
@@ -417,7 +446,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- holder->JSObjectVerify();
+ holder->HeapObjectVerify();
}
#endif
}
@@ -427,9 +456,10 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
// Returns true if a new transition has been created, or false if an existing
// transition was followed.
bool LookupIterator::PrepareTransitionToDataProperty(
- Handle<JSObject> receiver, Handle<Object> value,
+ Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
if (state_ == TRANSITION) return false;
if (!IsElement() && name()->IsPrivate()) {
@@ -497,10 +527,11 @@ bool LookupIterator::PrepareTransitionToDataProperty(
return created_new_map;
}
-void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
+void LookupIterator::ApplyTransitionToDataProperty(
+ Handle<JSReceiver> receiver) {
DCHECK_EQ(TRANSITION, state_);
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
holder_ = receiver;
if (receiver->IsJSGlobalObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
@@ -509,7 +540,10 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
}
Handle<Map> transition = transition_map();
bool simple_transition = transition->GetBackPointer() == receiver->map();
- JSObject::MigrateToMap(receiver, transition);
+
+ if (!receiver->IsJSProxy()) {
+ JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
+ }
if (simple_transition) {
int number = transition->LastAdded();
@@ -520,7 +554,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(),
isolate_);
int entry;
- if (receiver->map()->is_prototype_map()) {
+ if (receiver->map()->is_prototype_map() && receiver->IsJSObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
}
dictionary = NameDictionary::Add(dictionary, name(),
@@ -546,6 +580,7 @@ void LookupIterator::Delete() {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Delete(object, number_);
} else {
+ DCHECK(!name()->IsPrivateField());
bool is_prototype_map = holder->map()->is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
@@ -575,7 +610,7 @@ void LookupIterator::TransitionToAccessorProperty(
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
- Handle<JSObject> receiver = GetStoreTarget();
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
if (!IsElement() && name()->IsPrivate()) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
@@ -639,7 +674,7 @@ void LookupIterator::TransitionToAccessorProperty(
void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
PropertyAttributes attributes) {
- Handle<JSObject> receiver = GetStoreTarget();
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
holder_ = receiver;
PropertyDetails details(kAccessor, attributes, PropertyCellType::kMutable);
@@ -868,6 +903,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
JSGlobalObject::cast(*holder)->global_dictionary();
dictionary->CellAt(dictionary_entry())->set_value(*value);
} else {
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary* dictionary = holder->property_dictionary();
dictionary->ValueAtPut(dictionary_entry(), *value);
}
@@ -883,7 +919,7 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
switch (interceptor_state_) {
case InterceptorState::kUninitialized:
interceptor_state_ = InterceptorState::kSkipNonMasking;
- // Fall through.
+ V8_FALLTHROUGH;
case InterceptorState::kSkipNonMasking:
return true;
case InterceptorState::kProcessNonMasking:
@@ -934,13 +970,13 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
if (map->is_access_check_needed()) {
if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
}
- // Fall through.
+ V8_FALLTHROUGH;
case ACCESS_CHECK:
if (check_interceptor() && HasInterceptor<is_element>(map) &&
!SkipInterceptor<is_element>(JSObject::cast(holder))) {
if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
}
- // Fall through.
+ V8_FALLTHROUGH;
case INTERCEPTOR:
if (!is_element && map->IsJSGlobalObjectMap()) {
GlobalDictionary* dict =
@@ -996,6 +1032,7 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors->GetDetails(number_);
} else {
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary* dict = holder->property_dictionary();
int number = dict->FindEntry(name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 9ea2d77cf6..e107f534df 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -176,17 +176,17 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Factory* factory() const { return isolate_->factory(); }
Handle<Object> GetReceiver() const { return receiver_; }
- Handle<JSObject> GetStoreTarget() const {
- DCHECK(receiver_->IsJSObject());
+ template <class T>
+ Handle<T> GetStoreTarget() const {
+ DCHECK(receiver_->IsJSReceiver());
if (receiver_->IsJSGlobalProxy()) {
Map* map = JSGlobalProxy::cast(*receiver_)->map();
if (map->has_hidden_prototype()) {
return handle(JSGlobalObject::cast(map->prototype()), isolate_);
}
}
- return Handle<JSObject>::cast(receiver_);
+ return Handle<T>::cast(receiver_);
}
-
bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
@@ -213,13 +213,13 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
bool HasAccess() const;
/* PROPERTY */
- bool ExtendingNonExtensible(Handle<JSObject> receiver) {
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ bool ExtendingNonExtensible(Handle<JSReceiver> receiver) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
return !receiver->map()->is_extensible() &&
(IsElement() || !name_->IsPrivate());
}
void PrepareForDataProperty(Handle<Object> value);
- bool PrepareTransitionToDataProperty(Handle<JSObject> receiver,
+ bool PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
Handle<Object> value,
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
@@ -227,10 +227,10 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
DCHECK_EQ(TRANSITION, state_);
return transition_->IsPropertyCell() ||
(transition_map()->is_dictionary_map() &&
- !GetStoreTarget()->HasFastProperties()) ||
+ !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
transition_map()->GetBackPointer()->IsMap();
}
- void ApplyTransitionToDataProperty(Handle<JSObject> receiver);
+ void ApplyTransitionToDataProperty(Handle<JSReceiver> receiver);
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
void Delete();
@@ -275,11 +275,12 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
inline void UpdateProtector() {
if (IsElement()) return;
// This list must be kept in sync with
- // CodeStubAssembler::HasAssociatedProtector!
+ // CodeStubAssembler::CheckForAssociatedProtector!
if (*name_ == heap()->is_concat_spreadable_symbol() ||
*name_ == heap()->constructor_string() ||
*name_ == heap()->species_symbol() ||
- *name_ == heap()->iterator_symbol()) {
+ *name_ == heap()->iterator_symbol() ||
+ *name_ == heap()->then_string()) {
InternalUpdateProtector();
}
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 923535517a..1a1a5b29ff 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -259,6 +259,7 @@ class ErrorUtils : public AllStatic {
T(Debugger, "Debugger: %") \
T(DebuggerLoading, "Error loading debugger") \
T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
+ T(DeletePrivateField, "Private fields can not be deleted") \
T(UncaughtException, "Uncaught %") \
T(Unsupported, "Not supported") \
T(WrongServiceType, "Internal error, wrong service type: %") \
@@ -340,6 +341,7 @@ class ErrorUtils : public AllStatic {
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
+ T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
T(MethodCalledOnWrongObject, \
@@ -598,6 +600,7 @@ class ErrorUtils : public AllStatic {
T(IllegalLanguageModeDirective, \
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
+ T(IntrinsicWithSpread, "Intrinsic calls do not support spread arguments") \
T(InvalidRestBindingPattern, \
"`...` must be followed by an identifier in declaration contexts") \
T(InvalidRestAssignmentPattern, \
@@ -615,6 +618,7 @@ class ErrorUtils : public AllStatic {
"Invalid left-hand side expression in prefix operation") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
+ T(InvalidPrivateFieldAccess, "Invalid private field '%'") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -685,8 +689,6 @@ class ErrorUtils : public AllStatic {
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
- T(UnexpectedFunctionSent, \
- "function.sent expression is not allowed outside a generator") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
@@ -722,11 +724,10 @@ class ErrorUtils : public AllStatic {
T(WasmTrapDivByZero, "divide by zero") \
T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
T(WasmTrapRemByZero, "remainder by zero") \
- T(WasmTrapFloatUnrepresentable, "integer result unrepresentable") \
- T(WasmTrapFuncInvalid, "invalid function") \
+ T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
+ T(WasmTrapFuncInvalid, "invalid index into function table") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
- T(WasmTrapInvalidIndex, "invalid index into function table") \
- T(WasmTrapTypeError, "invalid type") \
+ T(WasmTrapTypeError, "wasm function signature contains illegal type") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 803c16b829..a5e2335852 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -46,8 +46,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
@@ -126,16 +125,14 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code* code, Address target) {
if (IsMipsArchVariant(kMips32r6)) {
// On R6 the address location is shifted by one instruction
set_target_address_at(
- isolate,
instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
code ? code->constant_pool() : nullptr, target);
} else {
set_target_address_at(
- isolate,
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
code ? code->constant_pool() : nullptr, target);
}
@@ -172,9 +169,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
// after complete deserialization, no need to flush on each reference.
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -200,7 +196,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -250,15 +246,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -267,12 +263,12 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index bd540346c0..a39c06eaa2 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -79,6 +79,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#if defined(_MIPS_ARCH_MIPS32R6)
// FP64 mode is implied on r6.
supported_ |= 1u << FP64FPU;
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#endif
#endif
#if defined(FPU_MODE_FP64)
supported_ |= 1u << FP64FPU;
@@ -91,8 +94,14 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
#elif defined(FPU_MODE_FP64)
supported_ |= 1u << FP64FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#else
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
+#endif
+#endif
#if defined(_MIPS_ARCH_MIPS32RX)
if (cpu.architecture() == 6) {
supported_ |= 1u << MIPSr6;
@@ -200,22 +209,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -272,8 +279,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- set_target_value_at(isolate, pc,
- reinterpret_cast<uint32_t>(object.location()));
+ set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
}
}
@@ -2492,15 +2498,6 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
}
-void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xFFFFFFFF;
- *hi = i >> 32;
-}
-
-
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
@@ -3889,11 +3886,8 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_value_at(Isolate* isolate, Address pc,
- uint32_t target,
+void Assembler::set_target_value_at(Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -3924,7 +3918,7 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
+ Assembler::FlushICache(pc, 2 * sizeof(int32_t));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 4c68e730b3..a5d608898f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -388,7 +388,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32))
+ RelocInfo::Mode rmode = RelocInfo::NONE))
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -400,8 +400,7 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value))
- : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -568,9 +567,9 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
INLINE(static void set_target_address_at)
- (Isolate* isolate, Address pc, Address target,
+ (Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- set_target_value_at(isolate, pc, reinterpret_cast<uint32_t>(target),
+ set_target_value_at(pc, reinterpret_cast<uint32_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
@@ -578,13 +577,13 @@ class Assembler : public AssemblerBase {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(isolate, pc, target, icache_flush_mode);
+ set_target_address_at(pc, target, icache_flush_mode);
}
static void set_target_value_at(
- Isolate* isolate, Address pc, uint32_t target,
+ Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -597,12 +596,11 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1893,8 +1891,6 @@ class Assembler : public AssemblerBase {
return internal_trampoline_exception_;
}
- void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
-
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
@@ -2235,4 +2231,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index c07422ff5f..b2e52745ed 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -541,9 +541,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -568,9 +568,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 565fcd9a68..496c715e81 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
+#ifndef V8_MIPS_CONSTANTS_MIPS_H_
+#define V8_MIPS_CONSTANTS_MIPS_H_
#include "src/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
@@ -1931,4 +1931,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
+#endif // V8_MIPS_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 1199365b7d..2e71817bd8 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -38,9 +38,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
+ if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index f27bdc9b68..d2f8ebb0ee 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -917,6 +917,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintMsaImmElm(instr);
return 4;
}
+ UNREACHABLE();
}
case 'r': { // 'r: registers.
return FormatRegister(instr, format);
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/mips/frame-constants-mips.h
index 344453794a..6d7e471b09 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/mips/frame-constants-mips.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
+#ifndef V8_MIPS_FRAME_CONSTANTS_MIPS_H_
+#define V8_MIPS_FRAME_CONSTANTS_MIPS_H_
namespace v8 {
namespace internal {
@@ -49,4 +49,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_MIPS_FRAME_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 795fdc4af8..a23f8f0fd4 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -68,12 +68,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 5c89467cd8..84cf23c832 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -14,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/mips/assembler-mips-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/register-configuration.h"
@@ -1162,6 +1163,7 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
@@ -1185,6 +1187,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
@@ -2170,23 +2173,22 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
}
}
-void TurboAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, Operand(bit_cast<int32_t>(imm)));
+ li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, double imm) {
- int64_t imm_bits = bit_cast<int64_t>(imm);
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
- if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
+ uint32_t lo = src & 0xFFFFFFFF;
+ uint32_t hi = src >> 32;
// Move the low part of the double into the lower of the corresponding FPU
// register of FPU register pair.
if (lo != 0) {
@@ -2308,6 +2310,79 @@ void TurboAssembler::Clz(Register rd, Register rs) {
}
}
+void TurboAssembler::Ctz(Register rd, Register rs) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ Ror(rd, rs, 16);
+ wsbh(rd, rd);
+ bitswap(rd, rd);
+ Clz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Addu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ Clz(rd, rd);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 32);
+ Subu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Popcnt(Register rd, Register rs) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For 64-bit quantities, this algorithm gets executed twice, (once
+ // for in_lo, and again for in_hi), but saves a few instructions
+ // because the mask values only have to be loaded once. Using this
+ // algorithm the count for a 64-bit operand can be performed in 29
+ // instructions compared to a loop-based algorithm which requires 47
+ // instructions.
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Subu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Addu(scratch, rd, scratch);
+ srl(rd, scratch, 4);
+ Addu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul(rd, rd, scratch);
+ srl(rd, rd, shift);
+}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
@@ -3991,12 +4066,27 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ li(t0, Operand(debug_is_active));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
+ Branch(&call_hook, ne, t0, Operand(zero_reg));
+
+ lw(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(t0, &skip_hook);
+ lw(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4054,13 +4144,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = t0;
+ Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
- Call(code, Code::kHeaderSize - kHeapObjectTag);
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Call(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- Jump(code, Code::kHeaderSize - kHeapObjectTag);
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -4105,14 +4197,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- li(a1, function);
- InvokeFunction(a1, expected, actual, flag);
-}
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -4467,6 +4551,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
+ li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5274,6 +5364,26 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing the code start address.
+ push(ra);
+
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ Label current;
+ bal(&current);
+ nop();
+ int pc = pc_offset();
+ bind(&current);
+ li(dst, pc);
+ subu(dst, ra, dst);
+
+ pop(ra); // Restore ra
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 8c70eb54a3..37d2c59270 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -19,12 +19,15 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -559,6 +562,8 @@ class TurboAssembler : public Assembler {
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
+ void Ctz(Register rd, Register rs);
+ void Popcnt(Register rd, Register rs);
// Int64Lowering instructions
void AddPair(Register dst_low, Register dst_high, Register left_low,
@@ -731,8 +736,10 @@ class TurboAssembler : public Assembler {
Mthc1(src_high, dst);
}
- void Move(FPURegister dst, float imm);
- void Move(FPURegister dst, double imm);
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
// -------------------------------------------------------------------------
// Overflow handling functions.
@@ -844,6 +851,12 @@ class TurboAssembler : public Assembler {
BranchF64(bd, target, nan, cc, cmp1, cmp2);
}
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
void BranchLong(Label* L, BranchDelaySlot bdslot);
@@ -1023,10 +1036,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support.
void MaybeDropFrames();
@@ -1089,6 +1098,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
BranchDelaySlot bd = PROTECT,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// -------------------------------------------------------------------------
// StatsCounter support.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 4994418ef5..b55273eba5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -783,8 +783,7 @@ void MipsDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -883,11 +882,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
@@ -2539,8 +2533,7 @@ void Simulator::PrintStopInfo(uint32_t code) {
void Simulator::SignalException(Exception e) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
- static_cast<int>(e));
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
// Min/Max template functions for Double and Single arguments.
@@ -5690,7 +5683,8 @@ void Simulator::DecodeTypeMsa3RF() {
case MSUB_Q:
case MADDR_Q:
case MSUBR_Q:
- get_msa_register(wd_reg(), &wd); // fall-through
+ get_msa_register(wd_reg(), &wd);
+ V8_FALLTHROUGH;
case MUL_Q:
case MULR_Q:
switch (DecodeMsaDataFormat()) {
@@ -6912,7 +6906,7 @@ void Simulator::DecodeTypeJump() {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
v8::internal::EmbeddedVector<char, 256> buffer;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 0c417becd5..ffd2c46740 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -258,6 +258,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -450,10 +451,10 @@ class Simulator : public SimulatorBase {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Error: Unexpected instruction 0x%08x immediately after a "
- "compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ FATAL(
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
@@ -480,9 +481,8 @@ class Simulator : public SimulatorBase {
}
if (instr->IsForbiddenInBranchDelay()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
+ FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeValue());
}
InstructionDecode(instr);
SNPrintF(trace_buf_, " ");
@@ -538,9 +538,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation.
- base::CustomMatcherHashMap* i_cache_;
-
v8::internal::Isolate* isolate_;
// Registered breakpoints.
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index ded3da224c..e05082ee40 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -33,9 +33,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#ifndef V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
+#define V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
#include "src/mips64/assembler-mips64.h"
@@ -46,8 +45,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
@@ -121,9 +119,9 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
- isolate, instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+ instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
code ? code->constant_pool() : nullptr, target);
}
@@ -144,9 +142,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
// after complete deserialization, no need to flush on each reference.
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsJ(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -172,7 +169,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
@@ -217,15 +214,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -234,12 +231,12 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -343,4 +340,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#endif // V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index a056f66849..6c0bebebce 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -76,12 +76,21 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS64R6)
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#else
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
+#endif
+#endif
}
@@ -178,22 +187,20 @@ uint32_t RelocInfo::embedded_size() const {
(Assembler::target_address_at(pc_, constant_pool_))));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -251,8 +258,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- set_target_value_at(isolate, pc,
- reinterpret_cast<uint64_t>(object.location()));
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
}
}
@@ -2893,15 +2899,6 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
}
-void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xFFFFFFFF;
- *hi = i >> 32;
-}
-
-
void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -4217,8 +4214,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_value_at(Isolate* isolate, Address pc,
- uint64_t target,
+void Assembler::set_target_value_at(Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
// in code on MIP64 because only 48-bits of address is effectively used.
@@ -4249,7 +4245,7 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(pc, 4 * Assembler::kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 3530c7e7b2..9f1fe59de8 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -32,9 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
-#define V8_MIPS_ASSEMBLER_MIPS_H_
+#ifndef V8_MIPS64_ASSEMBLER_MIPS64_H_
+#define V8_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
@@ -396,7 +395,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE64))
+ RelocInfo::Mode rmode = RelocInfo::NONE))
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -408,8 +407,7 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value))
- : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -577,9 +575,9 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_value_at(isolate, pc, reinterpret_cast<uint64_t>(target),
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
@@ -587,13 +585,13 @@ class Assembler : public AssemblerBase {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(isolate, pc, target, icache_flush_mode);
+ set_target_address_at(pc, target, icache_flush_mode);
}
static void set_target_value_at(
- Isolate* isolate, Address pc, uint64_t target,
+ Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -608,12 +606,11 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1963,8 +1960,6 @@ class Assembler : public AssemblerBase {
return internal_trampoline_exception_;
}
- void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
-
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
@@ -2300,4 +2295,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS64_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 0513611664..f5d20d8c2b 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CODE_STUBS_MIPS64_H_
-#define V8_MIPS_CODE_STUBS_MIPS64_H_
+#ifndef V8_MIPS64_CODE_STUBS_MIPS64_H_
+#define V8_MIPS64_CODE_STUBS_MIPS64_H_
namespace v8 {
namespace internal {
@@ -28,4 +28,4 @@ class DirectCEntryStub : public PlatformCodeStub {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_CODE_STUBS_MIPS64_H_
+#endif // V8_MIPS64_CODE_STUBS_MIPS64_H_
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 3be5e504bb..7b2fbd78a5 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -542,9 +542,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolte, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -569,9 +569,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 0c107d1e1b..e89c4a5df3 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
+#ifndef V8_MIPS64_CONSTANTS_MIPS64_H_
+#define V8_MIPS64_CONSTANTS_MIPS64_H_
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -2013,4 +2013,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
+#endif // V8_MIPS64_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index ab9cf69620..cc8ecdbd5a 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -38,9 +38,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
+ if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index d53b47d0c6..8f77a68b21 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -958,6 +958,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintMsaImmElm(instr);
return 4;
}
+ UNREACHABLE();
}
case 'r': { // 'r: registers.
return FormatRegister(instr, format);
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/mips64/frame-constants-mips64.h
index 344453794a..9c7455bcc5 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/mips64/frame-constants-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
+#ifndef V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#define V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
namespace v8 {
namespace internal {
@@ -49,4 +49,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 8bc04a0401..6aba359b78 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -68,12 +68,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 841f4665cf..79f486b4bb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -14,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
@@ -2703,39 +2704,38 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
mthc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, Operand(bit_cast<int32_t>(imm)));
+ li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, double imm) {
- int64_t imm_bits = bit_cast<int64_t>(imm);
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
- if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
- // Move the low part of the double into the lower bits of the corresponding
- // FPU register.
+ uint32_t lo = src & 0xFFFFFFFF;
+ uint32_t hi = src >> 32;
+ // Move the low part of the double into the lower of the corresponding FPU
+ // register of FPU register pair.
if (lo != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, lo);
+ li(scratch, Operand(lo));
mtc1(scratch, dst);
} else {
mtc1(zero_reg, dst);
}
- // Move the high part of the double into the high bits of the corresponding
- // FPU register.
+ // Move the high part of the double into the higher of the corresponding FPU
+ // register of FPU register pair.
if (hi != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, hi);
+ li(scratch, Operand(hi));
mthc1(scratch, dst);
} else {
mthc1(zero_reg, dst);
@@ -2776,6 +2776,136 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
+void TurboAssembler::Ctz(Register rd, Register rs) {
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ rotr(rd, rs, 16);
+ wsbh(rd, rd);
+ bitswap(rd, rd);
+ Clz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ Clz(rd, rd);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 32);
+ Subu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Dctz(Register rd, Register rs) {
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ dsbh(rd, rs);
+ dshd(rd, rd);
+ dbitswap(rd, rd);
+ dclz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ dclz(rd, rd);
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 64);
+ Dsubu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Popcnt(Register rd, Register rs) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For a 64-bit operand this can be performed in 24 instructions compared
+ // to a(n unrolled) loop based algorithm which requires 38 instructions.
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Subu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Addu(scratch, rd, scratch);
+ srl(rd, scratch, 4);
+ Addu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul(rd, rd, scratch);
+ srl(rd, rd, shift);
+}
+
+void TurboAssembler::Dpopcnt(Register rd, Register rs) {
+ uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ dsrl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Dsubu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ dsrl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Daddu(scratch, rd, scratch);
+ dsrl(rd, scratch, 4);
+ Daddu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Dmul(rd, rd, scratch);
+ dsrl32(rd, rd, shift);
+}
+
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
@@ -4267,12 +4397,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ li(t0, Operand(debug_is_active));
+ Lb(t0, MemOperand(t0));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
Lb(t0, MemOperand(t0));
+ Branch(&call_hook, ne, t0, Operand(zero_reg));
+
+ Ld(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(t0, &skip_hook);
+ Ld(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4330,7 +4476,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = t0;
+ Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4383,14 +4529,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- li(a1, function);
- InvokeFunction(a1, expected, actual, flag);
-}
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -4723,6 +4861,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5543,6 +5687,26 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing the code start address.
+ push(ra);
+
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ Label current;
+ bal(&current);
+ nop();
+ int pc = pc_offset();
+ bind(&current);
+ li(dst, Operand(pc));
+ Dsubu(dst, ra, dst);
+
+ pop(ra); // Restore ra
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index f89682d34c..f623f7f3cb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
+#define V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#include "src/assembler.h"
#include "src/globals.h"
@@ -19,12 +19,15 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -601,6 +604,10 @@ class TurboAssembler : public Assembler {
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
+ void Ctz(Register rd, Register rs);
+ void Dctz(Register rd, Register rs);
+ void Popcnt(Register rd, Register rs);
+ void Dpopcnt(Register rd, Register rs);
// MIPS64 R2 instruction macro.
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -744,8 +751,10 @@ class TurboAssembler : public Assembler {
}
}
- void Move(FPURegister dst, float imm);
- void Move(FPURegister dst, double imm);
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
inline void MulBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
@@ -875,6 +884,12 @@ class TurboAssembler : public Assembler {
void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
Register scratch = at);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
@@ -1091,10 +1106,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support.
void MaybeDropFrames();
@@ -1156,6 +1167,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
BranchDelaySlot bd = PROTECT,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1301,4 +1315,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index ebb8a76ad7..9177f8e6aa 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -715,8 +715,7 @@ void MipsDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -814,11 +813,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_size_ = FLAG_sim_stack_size * KB;
@@ -2484,8 +2478,7 @@ void Simulator::PrintStopInfo(uint64_t code) {
void Simulator::SignalException(Exception e) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
- static_cast<int>(e));
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
// Min/Max template functions for Double and Single arguments.
@@ -5914,7 +5907,8 @@ void Simulator::DecodeTypeMsa3RF() {
case MSUB_Q:
case MADDR_Q:
case MSUBR_Q:
- get_msa_register(wd_reg(), &wd); // fall-through
+ get_msa_register(wd_reg(), &wd);
+ V8_FALLTHROUGH;
case MUL_Q:
case MULR_Q:
switch (DecodeMsaDataFormat()) {
@@ -7260,7 +7254,7 @@ void Simulator::DecodeTypeJump() {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index c4292236b0..115dde2103 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -9,8 +9,8 @@
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
-#ifndef V8_MIPS_SIMULATOR_MIPS_H_
-#define V8_MIPS_SIMULATOR_MIPS_H_
+#ifndef V8_MIPS64_SIMULATOR_MIPS64_H_
+#define V8_MIPS64_SIMULATOR_MIPS64_H_
#include "src/allocation.h"
#include "src/mips64/constants-mips64.h"
@@ -260,6 +260,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -472,10 +473,10 @@ class Simulator : public SimulatorBase {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Error: Unexpected instruction 0x%08x immediately after a "
- "compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ FATAL(
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
@@ -502,9 +503,8 @@ class Simulator : public SimulatorBase {
}
if (instr->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
+ FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeValue());
}
InstructionDecode(instr);
SNPrintF(trace_buf_, " ");
@@ -559,9 +559,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation.
- base::CustomMatcherHashMap* i_cache_;
-
v8::internal::Isolate* isolate_;
// Registered breakpoints.
@@ -586,4 +583,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_MIPS_SIMULATOR_MIPS_H_
+#endif // V8_MIPS64_SIMULATOR_MIPS64_H_
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index bd391d272b..a3e51e15e7 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -349,8 +349,7 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kRelocationInfoOffset + kPointerSize == kHandlerTableOffset);
- STATIC_ASSERT(kHandlerTableOffset + kPointerSize ==
+ STATIC_ASSERT(kRelocationInfoOffset + kPointerSize ==
kDeoptimizationDataOffset);
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
kSourcePositionTableOffset);
@@ -378,9 +377,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
RelocIterator it(Code::cast(obj), mode_mask);
- Isolate* isolate = obj->GetIsolate();
for (; !it.done(); it.next()) {
- it.rinfo()->Visit(isolate, v);
+ it.rinfo()->Visit(v);
}
}
@@ -451,6 +449,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
switch (type) {
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
case FIXED_DOUBLE_ARRAY_TYPE:
return ReturnType();
@@ -460,6 +459,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
+ case FEEDBACK_CELL_TYPE:
+ return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3);
case FEEDBACK_VECTOR_TYPE:
return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3);
case JS_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 142dbf6611..458b807f05 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -17,7 +17,9 @@
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/microtask-inl.h"
#include "src/objects/module.h"
+#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions.h"
@@ -25,6 +27,28 @@
namespace v8 {
namespace internal {
+// Heap Verification Overview
+// --------------------------
+// - Each InstanceType has a separate XXXVerify method which checks an object's
+// integrity in isolation.
+// - --verify-heap will iterate over all gc spaces and call ObjectVerify() on
+// every encountered tagged pointer.
+// - Verification should be pushed down to the specific instance type if its
+// integrity is independent of an outer object.
+// - In cases where the InstanceType is too genernic (e.g. FixedArray) the
+// XXXVerify of the outer method has to do recursive verification.
+// - If the corresponding objects have inheritence the parent's Verify method
+// is called as well.
+// - For any field containing pointes VerifyPointer(...) should be called.
+//
+// Caveats
+// -------
+// - Assume that any of the verify methods is incomplete!
+// - Some integrity checks are only partially done due to objects being in
+// partially initialized states when a gc happens, for instance when outer
+// objects are allocted before inner ones.
+//
+
#ifdef VERIFY_HEAP
void Object::ObjectVerify() {
@@ -65,12 +89,13 @@ void HeapObject::HeapObjectVerify() {
CHECK(map()->IsMap());
InstanceType instance_type = map()->instance_type();
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringVerify();
- return;
- }
switch (instance_type) {
+#define STRING_TYPE_CASE(TYPE, size, name, camel_name) case TYPE:
+ STRING_TYPE_LIST(STRING_TYPE_CASE)
+#undef STRING_TYPE_CASE
+ String::cast(this)->StringVerify();
+ break;
case SYMBOL_TYPE:
Symbol::cast(this)->SymbolVerify();
break;
@@ -86,6 +111,7 @@ void HeapObject::HeapObjectVerify() {
break;
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -109,6 +135,9 @@ void HeapObject::HeapObjectVerify() {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
+ case FEEDBACK_CELL_TYPE:
+ FeedbackCell::cast(this)->FeedbackCellVerify();
+ break;
case FEEDBACK_VECTOR_TYPE:
FeedbackVector::cast(this)->FeedbackVectorVerify();
break;
@@ -267,10 +296,6 @@ void HeapObject::HeapObjectVerify() {
case STORE_HANDLER_TYPE:
StoreHandler::cast(this)->StoreHandlerVerify();
break;
-
- default:
- UNREACHABLE();
- break;
}
}
@@ -287,6 +312,7 @@ void Symbol::SymbolVerify() {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
+ CHECK_IMPLIES(IsPrivateField(), IsPrivate());
}
@@ -315,6 +341,13 @@ void FreeSpace::FreeSpaceVerify() {
CHECK(IsFreeSpace());
}
+void FeedbackCell::FeedbackCellVerify() {
+ CHECK(IsFeedbackCell());
+ Isolate* const isolate = GetIsolate();
+ VerifyHeapPointer(value());
+ CHECK(value()->IsUndefined(isolate) || value()->IsFeedbackVector());
+}
+
void FeedbackVector::FeedbackVectorVerify() { CHECK(IsFeedbackVector()); }
template <class Traits>
@@ -742,6 +775,9 @@ void JSBoundFunction::JSBoundFunctionVerify() {
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
+ JSObjectVerify();
+ VerifyHeapPointer(feedback_cell());
+ CHECK(feedback_cell()->IsFeedbackCell());
CHECK(code()->IsCode());
CHECK(map()->is_callable());
if (has_prototype_slot()) {
@@ -758,7 +794,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kFeedbackMetadataOffset);
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kFunctionIdentifierOffset);
- VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kNameOffset);
VerifyObjectField(kOuterScopeInfoOffset);
VerifyObjectField(kScopeInfoOffset);
@@ -1041,33 +1076,102 @@ void JSWeakSet::JSWeakSetVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
+void Microtask::MicrotaskVerify() { CHECK(IsMicrotask()); }
+
+void CallableTask::CallableTaskVerify() {
+ CHECK(IsCallableTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(callable());
+ CHECK(callable()->IsCallable());
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+}
+
+void CallbackTask::CallbackTaskVerify() {
+ CHECK(IsCallbackTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(callback());
+ VerifyHeapPointer(data());
+}
+
+void PromiseReactionJobTask::PromiseReactionJobTaskVerify() {
+ CHECK(IsPromiseReactionJobTask());
+ MicrotaskVerify();
+ Isolate* isolate = GetIsolate();
+ VerifyPointer(argument());
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+ VerifyHeapPointer(handler());
+ VerifyHeapPointer(payload());
+ if (handler()->IsCode()) {
+ CHECK(payload()->IsJSReceiver());
+ } else {
+ CHECK(handler()->IsUndefined(isolate) || handler()->IsCallable());
+ CHECK(payload()->IsJSPromise() || payload()->IsPromiseCapability());
+ }
+}
+
+void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify() {
+ CHECK(IsPromiseFulfillReactionJobTask());
+ PromiseReactionJobTaskVerify();
+}
+
+void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskVerify() {
+ CHECK(IsPromiseRejectReactionJobTask());
+ PromiseReactionJobTaskVerify();
+}
+
+void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskVerify() {
+ CHECK(IsPromiseResolveThenableJobTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+ VerifyHeapPointer(promise_to_resolve());
+ CHECK(promise_to_resolve()->IsJSPromise());
+ VerifyHeapPointer(then());
+ CHECK(then()->IsCallable());
+ CHECK(then()->IsJSReceiver());
+ VerifyHeapPointer(thenable());
+ CHECK(thenable()->IsJSReceiver());
+}
+
void PromiseCapability::PromiseCapabilityVerify() {
CHECK(IsPromiseCapability());
- VerifyPointer(promise());
+ Isolate* isolate = GetIsolate();
+ VerifyHeapPointer(promise());
+ CHECK(promise()->IsJSReceiver() || promise()->IsUndefined(isolate));
VerifyPointer(resolve());
VerifyPointer(reject());
}
+void PromiseReaction::PromiseReactionVerify() {
+ CHECK(IsPromiseReaction());
+ Isolate* isolate = GetIsolate();
+ VerifyPointer(next());
+ CHECK(next()->IsSmi() || next()->IsPromiseReaction());
+ VerifyHeapPointer(reject_handler());
+ VerifyHeapPointer(fulfill_handler());
+ VerifyHeapPointer(payload());
+ if (reject_handler()->IsCode()) {
+ CHECK(fulfill_handler()->IsCode());
+ CHECK(payload()->IsJSReceiver());
+ } else {
+ CHECK(reject_handler()->IsUndefined(isolate) ||
+ reject_handler()->IsCallable());
+ CHECK(fulfill_handler()->IsUndefined(isolate) ||
+ fulfill_handler()->IsCallable());
+ CHECK(payload()->IsJSPromise() || payload()->IsPromiseCapability());
+ }
+}
+
void JSPromise::JSPromiseVerify() {
CHECK(IsJSPromise());
JSObjectVerify();
- Isolate* isolate = GetIsolate();
- CHECK(result()->IsUndefined(isolate) || result()->IsObject());
- CHECK(deferred_promise()->IsUndefined(isolate) ||
- deferred_promise()->IsJSReceiver() ||
- deferred_promise()->IsFixedArray());
- CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
- deferred_on_resolve()->IsCallable() ||
- deferred_on_resolve()->IsFixedArray());
- CHECK(deferred_on_reject()->IsUndefined(isolate) ||
- deferred_on_reject()->IsCallable() ||
- deferred_on_reject()->IsFixedArray());
- CHECK(fulfill_reactions()->IsUndefined(isolate) ||
- fulfill_reactions()->IsCallable() || fulfill_reactions()->IsSymbol() ||
- fulfill_reactions()->IsFixedArray());
- CHECK(reject_reactions()->IsUndefined(isolate) ||
- reject_reactions()->IsSymbol() || reject_reactions()->IsCallable() ||
- reject_reactions()->IsFixedArray());
+ VerifyPointer(reactions_or_result());
+ VerifySmiField(kFlagsOffset);
+ if (status() == Promise::kPending) {
+ CHECK(reactions()->IsSmi() || reactions()->IsPromiseReaction());
+ }
}
template <typename Derived>
@@ -1214,33 +1318,6 @@ void Foreign::ForeignVerify() {
}
-void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
- CHECK(IsPromiseResolveThenableJobInfo());
- CHECK(thenable()->IsJSReceiver());
- CHECK(then()->IsJSReceiver());
- CHECK(resolve()->IsJSFunction());
- CHECK(reject()->IsJSFunction());
- CHECK(context()->IsContext());
-}
-
-void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
- Isolate* isolate = GetIsolate();
- CHECK(IsPromiseReactionJobInfo());
- CHECK(value()->IsObject());
- CHECK(tasks()->IsFixedArray() || tasks()->IsCallable() ||
- tasks()->IsSymbol());
- CHECK(deferred_promise()->IsUndefined(isolate) ||
- deferred_promise()->IsJSReceiver() ||
- deferred_promise()->IsFixedArray());
- CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
- deferred_on_resolve()->IsCallable() ||
- deferred_on_resolve()->IsFixedArray());
- CHECK(deferred_on_reject()->IsUndefined(isolate) ||
- deferred_on_reject()->IsCallable() ||
- deferred_on_reject()->IsFixedArray());
- CHECK(context()->IsContext());
-}
-
void AsyncGeneratorRequest::AsyncGeneratorRequestVerify() {
CHECK(IsAsyncGeneratorRequest());
VerifySmiField(kResumeModeOffset);
@@ -1256,8 +1333,6 @@ void BigInt::BigIntVerify() {
CHECK(IsBigInt());
CHECK_GE(length(), 0);
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
- // TODO(neis): Somewhere check that MSD is non-zero. Doesn't hold during some
- // operations that allocate which is why we can't test it here.
}
void JSModuleNamespace::JSModuleNamespaceVerify() {
@@ -1298,7 +1373,7 @@ void Module::ModuleVerify() {
CHECK((status() >= kEvaluating && code()->IsModuleInfo()) ||
(status() == kInstantiated && code()->IsJSGeneratorObject()) ||
- (status() >= kInstantiating && code()->IsJSFunction()) ||
+ (status() == kInstantiating && code()->IsJSFunction()) ||
(code()->IsSharedFunctionInfo()));
CHECK_EQ(status() == kErrored, !exception()->IsTheHole(GetIsolate()));
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1cbc2ca418..9c3ac94ab5 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -40,6 +40,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
@@ -78,13 +79,14 @@ int PropertyDetails::field_width_in_words() const {
}
TYPE_CHECKER(BigInt, BIGINT_TYPE)
-TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
+TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
+TYPE_CHECKER(FeedbackCell, FEEDBACK_CELL_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
@@ -100,7 +102,6 @@ TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
@@ -109,11 +110,11 @@ TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(PropertyDescriptorObject, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(ScopeInfo, SCOPE_INFO_TYPE)
TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
-TYPE_CHECKER(TemplateMap, HASH_TABLE_TYPE)
-TYPE_CHECKER(TemplateObjectDescription, TUPLE3_TYPE)
+TYPE_CHECKER(TemplateObjectDescription, TUPLE2_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
@@ -322,7 +323,15 @@ bool HeapObject::IsJSWeakCollection() const {
bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
-bool HeapObject::IsPromiseCapability() const { return IsTuple3(); }
+bool HeapObject::IsMicrotask() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= FIRST_MICROTASK_TYPE &&
+ instance_type <= LAST_MICROTASK_TYPE);
+}
+
+bool HeapObject::IsPromiseReactionJobTask() const {
+ return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
+}
bool HeapObject::IsEnumCache() const { return IsTuple2(); }
@@ -395,10 +404,6 @@ bool HeapObject::IsScriptContextTable() const {
return map() == GetHeap()->script_context_table_map();
}
-bool HeapObject::IsScopeInfo() const {
- return map() == GetHeap()->scope_info_map();
-}
-
template <>
inline bool Is<JSFunction>(Object* obj) {
return obj->IsJSFunction();
@@ -412,6 +417,22 @@ bool HeapObject::IsStringWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
+bool HeapObject::IsBooleanWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsBoolean();
+}
+
+bool HeapObject::IsScriptWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsScript();
+}
+
+bool HeapObject::IsNumberWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsNumber();
+}
+
+bool HeapObject::IsSymbolWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsSymbol();
+}
+
bool HeapObject::IsBoolean() const {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
@@ -441,6 +462,10 @@ bool HeapObject::IsNumberDictionary() const {
return map() == GetHeap()->number_dictionary_map();
}
+bool HeapObject::IsSimpleNumberDictionary() const {
+ return map() == GetHeap()->simple_number_dictionary_map();
+}
+
bool HeapObject::IsStringTable() const {
return map() == GetHeap()->string_table_map();
}
@@ -556,6 +581,7 @@ CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
+CAST_ACCESSOR(FeedbackCell)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
@@ -572,7 +598,6 @@ CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
@@ -580,6 +605,7 @@ CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(NumberDictionary)
CAST_ACCESSOR(Object)
CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(ObjectHashTable)
@@ -587,15 +613,12 @@ CAST_ACCESSOR(ObjectTemplateInfo)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PromiseCapability)
-CAST_ACCESSOR(PromiseReactionJobInfo)
-CAST_ACCESSOR(PromiseResolveThenableJobInfo)
CAST_ACCESSOR(PropertyArray)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(PrototypeInfo)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(NumberDictionary)
+CAST_ACCESSOR(SimpleNumberDictionary)
CAST_ACCESSOR(SmallOrderedHashMap)
CAST_ACCESSOR(SmallOrderedHashSet)
CAST_ACCESSOR(Smi)
@@ -605,7 +628,6 @@ CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(TemplateMap)
CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
@@ -805,10 +827,6 @@ MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
return ConvertToIndex(isolate, input, error_index);
}
-bool Object::HasSpecificClassOf(String* name) {
- return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
-}
-
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
LookupIterator it(object, name);
@@ -1371,6 +1389,7 @@ Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
ACCESSORS(Cell, value, Object, kValueOffset)
+ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, name, Name, kNameOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
@@ -1716,13 +1735,14 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
return UPDATE_WRITE_BARRIER;
}
-AllocationAlignment HeapObject::RequiredAlignment() const {
+AllocationAlignment HeapObject::RequiredAlignment(Map* map) {
#ifdef V8_HOST_ARCH_32_BIT
- if ((IsFixedFloat64Array() || IsFixedDoubleArray()) &&
- FixedArrayBase::cast(this)->length() != 0) {
+ int instance_type = map->instance_type();
+ if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
+ instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return kDoubleAligned;
}
- if (IsHeapNumber()) return kDoubleUnaligned;
+ if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
}
@@ -2246,7 +2266,7 @@ ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, feedback_vector_cell, Cell, kFeedbackVectorOffset)
+ACCESSORS(JSFunction, feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
@@ -2272,22 +2292,6 @@ bool AccessorInfo::has_getter() {
return result;
}
-ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
-
-ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
-ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_promise, Object,
- kDeferredPromiseOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_on_resolve, Object,
- kDeferredOnResolveOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_on_reject, Object,
- kDeferredOnRejectOffset);
-ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
-
ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
@@ -2356,7 +2360,6 @@ bool ConstantElementsPair::is_empty() const {
return constant_values()->length() == 0;
}
-SMI_ACCESSORS(TemplateObjectDescription, hash, kHashOffset)
ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
kCookedStringsOffset)
@@ -2494,7 +2497,7 @@ SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
kSourcePositionTableIndex)
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- NumberDictionary, kStackFrameCacheIndex)
+ SimpleNumberDictionary, kStackFrameCacheIndex)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -2512,8 +2515,8 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
FeedbackVector* JSFunction::feedback_vector() const {
- DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
- return FeedbackVector::cast(feedback_vector_cell()->value());
+ DCHECK(has_feedback_vector());
+ return FeedbackVector::cast(feedback_cell()->value());
}
// Code objects that are marked for deoptimization are not considered to be
@@ -2620,21 +2623,7 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
}
bool JSFunction::has_feedback_vector() const {
- return !feedback_vector_cell()->value()->IsUndefined(GetIsolate());
-}
-
-JSFunction::FeedbackVectorState JSFunction::GetFeedbackVectorState(
- Isolate* isolate) const {
- Cell* cell = feedback_vector_cell();
- if (shared()->HasAsmWasmData()) {
- return NO_VECTOR_NEEDED;
- } else if (cell == isolate->heap()->undefined_cell()) {
- return TOP_LEVEL_SCRIPT_NEEDS_VECTOR;
- } else if (cell->value() == isolate->heap()->undefined_value() ||
- !has_feedback_vector()) {
- return NEEDS_VECTOR;
- }
- return HAS_VECTOR;
+ return !feedback_cell()->value()->IsUndefined(GetIsolate());
}
Context* JSFunction::context() {
@@ -2769,8 +2758,7 @@ bool JSGeneratorObject::is_executing() const {
}
ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
-ACCESSORS(JSAsyncGeneratorObject, awaited_promise, HeapObject,
- kAwaitedPromiseOffset)
+SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -2806,23 +2794,6 @@ SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
-
-
-ACCESSORS(PromiseCapability, promise, Object, kPromiseOffset)
-ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
-ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
-
-ACCESSORS(JSPromise, result, Object, kResultOffset)
-ACCESSORS(JSPromise, deferred_promise, Object, kDeferredPromiseOffset)
-ACCESSORS(JSPromise, deferred_on_resolve, Object, kDeferredOnResolveOffset)
-ACCESSORS(JSPromise, deferred_on_reject, Object, kDeferredOnRejectOffset)
-ACCESSORS(JSPromise, fulfill_reactions, Object, kFulfillReactionsOffset)
-ACCESSORS(JSPromise, reject_reactions, Object, kRejectReactionsOffset)
-SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
-BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
-BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
-
-
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
#if VERIFY_HEAP && DEBUG
@@ -3304,30 +3275,35 @@ void GlobalDictionary::ValueAtPut(int entry, Object* value) {
set(EntryToIndex(entry), value);
}
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object* other) {
DCHECK(other->IsNumber());
return key == static_cast<uint32_t>(other->Number());
}
-uint32_t NumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
+uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
return ComputeIntegerHash(key, isolate->heap()->HashSeed());
}
-uint32_t NumberDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
+uint32_t NumberDictionaryBaseShape::HashForObject(Isolate* isolate,
+ Object* other) {
DCHECK(other->IsNumber());
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()),
isolate->heap()->HashSeed());
}
+Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
+ uint32_t key) {
+ return isolate->factory()->NewNumberFromUint(key);
+}
+
int NumberDictionaryShape::GetMapRootIndex() {
return Heap::kNumberDictionaryMapRootIndex;
}
-Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) {
- return isolate->factory()->NewNumberFromUint(key);
+int SimpleNumberDictionaryShape::GetMapRootIndex() {
+ return Heap::kSimpleNumberDictionaryMapRootIndex;
}
-
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
DCHECK(other->IsTheHole(key->GetIsolate()) ||
Name::cast(other)->IsUniqueName());
@@ -3488,6 +3464,7 @@ ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
kSyncIteratorOffset)
+ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index f13c222632..68f147f7d4 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -13,6 +13,8 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
@@ -41,7 +43,6 @@ void Object::Print(std::ostream& os) { // NOLINT
}
}
-
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << reinterpret_cast<void*>(this) << ": [";
if (id != nullptr) {
@@ -51,6 +52,7 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
}
os << "]";
if (GetHeap()->InOldSpace(this)) os << " in OldSpace";
+ if (!IsMap()) os << "\n - map: " << Brief(map());
}
@@ -106,6 +108,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case TRANSITION_ARRAY_TYPE:
TransitionArray::cast(this)->TransitionArrayPrint(os);
break;
+ case FEEDBACK_CELL_TYPE:
+ FeedbackCell::cast(this)->FeedbackCellPrint(os);
+ break;
case FEEDBACK_VECTOR_TYPE:
FeedbackVector::cast(this)->FeedbackVectorPrint(os);
break;
@@ -244,10 +249,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case LOAD_HANDLER_TYPE:
LoadHandler::cast(this)->LoadHandlerPrint(os);
break;
-
case STORE_HANDLER_TYPE:
StoreHandler::cast(this)->StoreHandlerPrint(os);
break;
+ case SCOPE_INFO_TYPE:
+ ScopeInfo::cast(this)->ScopeInfoPrint(os);
+ break;
default:
os << "UNKNOWN TYPE " << map()->instance_type();
@@ -412,14 +419,14 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements* elements) {
Isolate* isolate = elements->GetIsolate();
FixedArray* arguments_store = elements->arguments();
- os << "\n 0: context = " << Brief(elements->context())
- << "\n 1: arguments_store = " << Brief(arguments_store)
+ os << "\n 0: context: " << Brief(elements->context())
+ << "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
Object* mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
- << ") = " << Brief(mapped_entry);
+ << "): " << Brief(mapped_entry);
if (mapped_entry->IsTheHole(isolate)) {
os << " in the arguments_store[" << i << "]";
} else {
@@ -428,7 +435,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
if (arguments_store->length() == 0) return;
os << "\n }"
- << "\n - arguments_store = " << Brief(arguments_store) << " "
+ << "\n - arguments_store: " << Brief(arguments_store) << " "
<< ElementsKindToString(arguments_store->map()->elements_kind()) << " {";
if (kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
PrintFixedArrayElements(os, arguments_store);
@@ -443,7 +450,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << " - elements = " << Brief(elements()) << " {";
+ os << " - elements: " << Brief(elements()) << " {";
if (elements()->length() == 0) {
os << " }\n";
return;
@@ -492,21 +499,21 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " [";
+ os << " [";
if (obj->HasFastProperties()) {
os << "FastProperties";
} else {
os << "DictionaryProperties";
}
PrototypeIterator iter(obj->GetIsolate(), obj);
- os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
- os << "\n - elements = " << Brief(obj->elements()) << " ["
+ os << "]\n - prototype: " << Brief(iter.GetCurrent());
+ os << "\n - elements: " << Brief(obj->elements()) << " ["
<< ElementsKindToString(obj->map()->elements_kind());
if (obj->elements()->IsCowArray()) os << " (COW)";
os << "]";
Object* hash = obj->GetHash();
if (hash->IsSmi()) {
- os << "\n - hash = " << Brief(hash);
+ os << "\n - hash: " << Brief(hash);
}
if (obj->GetEmbedderFieldCount() > 0) {
os << "\n - embedder fields: " << obj->GetEmbedderFieldCount();
@@ -516,7 +523,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n - properties = ";
+ os << "\n - properties: ";
Object* properties_or_hash = obj->raw_properties_or_hash();
if (!properties_or_hash->IsSmi()) {
os << Brief(properties_or_hash);
@@ -545,27 +552,26 @@ void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArray");
- os << "\n - length = " << Brief(this->length());
+ os << "\n - length: " << Brief(this->length());
JSObjectPrintBody(os, this);
}
void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSPromise");
- os << "\n - status = " << JSPromise::Status(status());
- os << "\n - result = " << Brief(result());
- os << "\n - deferred_promise: " << Brief(deferred_promise());
- os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
- os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
- os << "\n - fulfill_reactions = " << Brief(fulfill_reactions());
- os << "\n - reject_reactions = " << Brief(reject_reactions());
- os << "\n - has_handler = " << has_handler();
+ os << "\n - status: " << JSPromise::Status(status());
+ if (status() == Promise::kPending) {
+ os << "\n - reactions: " << Brief(reactions());
+ } else {
+ os << "\n - result: " << Brief(result());
+ }
+ os << "\n - has_handler: " << has_handler();
os << "\n ";
}
void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSRegExp");
- os << "\n - data = " << Brief(data());
- os << "\n - source = " << Brief(source());
+ os << "\n - data: " << Brief(data());
+ os << "\n - source: " << Brief(source());
JSObjectPrintBody(os, this);
}
@@ -654,7 +660,6 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, IsHashTable() ? "HashTable" : "FixedArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
PrintFixedArrayElements(os, this);
os << "\n";
@@ -662,7 +667,6 @@ void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
os << "\n - hash: " << Hash();
PrintFixedArrayElements(os, this);
@@ -671,7 +675,6 @@ void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
DoPrintElements<FixedDoubleArray>(os, this);
os << "\n";
@@ -689,20 +692,31 @@ void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-template void FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::Print();
-template void FeedbackVectorSpecBase<FeedbackVectorSpec>::Print();
+void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "FeedbackCell");
+ if (map() == GetHeap()->no_closures_cell_map()) {
+ os << "\n - no closures";
+ } else if (map() == GetHeap()->one_closure_cell_map()) {
+ os << "\n - one closure";
+ } else if (map() == GetHeap()->many_closures_cell_map()) {
+ os << "\n - many closures";
+ } else {
+ os << "\n - Invalid FeedbackCell map";
+ }
+ os << " - value: " << Brief(value());
+ os << "\n";
+}
-template <typename Derived>
-void FeedbackVectorSpecBase<Derived>::Print() {
+void FeedbackVectorSpec::Print() {
OFStream os(stdout);
+
FeedbackVectorSpecPrint(os);
+
os << std::flush;
}
-template <typename Derived>
-void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
- std::ostream& os) { // NOLINT
- int slot_count = This()->slots();
+void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
+ int slot_count = slots();
os << " - slot_count: " << slot_count;
if (slot_count == 0) {
os << " (empty)\n";
@@ -710,7 +724,7 @@ void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
}
for (int slot = 0; slot < slot_count;) {
- FeedbackSlotKind kind = This()->GetKind(FeedbackSlot(slot));
+ FeedbackSlotKind kind = GetKind(FeedbackSlot(slot));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
os << "\n Slot #" << slot << " " << kind;
@@ -773,7 +787,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
FeedbackSlotKind kind = iter.kind();
os << "\n - slot " << slot << " " << kind << " ";
- FeedbackSlotPrint(os, slot, kind);
+ FeedbackSlotPrint(os, slot);
int entry_size = iter.entry_size();
if (entry_size > 0) os << " {";
@@ -788,71 +802,62 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
FeedbackSlot slot) { // NOLINT
- FeedbackSlotPrint(os, slot, GetKind(slot));
+ FeedbackNexus nexus(this, slot);
+ nexus.Print(os);
}
-void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
- FeedbackSlotKind kind) { // NOLINT
- switch (kind) {
- case FeedbackSlotKind::kLoadProperty: {
- LoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+namespace {
+
+const char* ICState2String(InlineCacheState state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case PREMONOMORPHIC:
+ return "PREMONOMORPHIC";
+ case MONOMORPHIC:
+ return "MONOMORPHIC";
+ case RECOMPUTE_HANDLER:
+ return "RECOMPUTE_HANDLER";
+ case POLYMORPHIC:
+ return "POLYMORPHIC";
+ case MEGAMORPHIC:
+ return "MEGAMORPHIC";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+}
+} // anonymous namespace
+
+void FeedbackNexus::Print(std::ostream& os) { // NOLINT
+ switch (kind()) {
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
- LoadGlobalICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kLoadKeyed: {
- KeyedLoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kCall: {
- CallICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+ case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kStoreKeyedStrict: {
- KeyedStoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
+ os << ICState2String(StateFromFeedback());
break;
}
case FeedbackSlotKind::kBinaryOp: {
- BinaryOpICNexus nexus(this, slot);
- os << "BinaryOp:" << nexus.GetBinaryOperationFeedback();
+ os << "BinaryOp:" << GetBinaryOperationFeedback();
break;
}
case FeedbackSlotKind::kCompareOp: {
- CompareICNexus nexus(this, slot);
- os << "CompareOp:" << nexus.GetCompareOperationFeedback();
+ os << "CompareOp:" << GetCompareOperationFeedback();
break;
}
case FeedbackSlotKind::kForIn: {
- ForInICNexus nexus(this, slot);
- os << "ForIn:" << nexus.GetForInFeedback();
- break;
- }
- case FeedbackSlotKind::kInstanceOf: {
- InstanceOfICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- StoreDataPropertyInLiteralICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
+ os << "ForIn:" << GetForInFeedback();
break;
}
case FeedbackSlotKind::kCreateClosure:
@@ -868,7 +873,7 @@ void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSValue");
- os << "\n - value = " << Brief(value());
+ os << "\n - value: " << Brief(value());
JSObjectPrintBody(os, this);
}
@@ -933,7 +938,7 @@ static const char* const weekdays[] = {
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDate");
- os << "\n - value = " << Brief(value());
+ os << "\n - value: " << Brief(value());
if (!year()->IsSmi()) {
os << "\n - time = NaN\n";
} else {
@@ -955,10 +960,9 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSProxy");
- os << "\n - map = " << reinterpret_cast<void*>(map());
- os << "\n - target = ";
+ os << "\n - target: ";
target()->ShortPrint(os);
- os << "\n - handler = ";
+ os << "\n - handler: ";
handler()->ShortPrint(os);
os << "\n";
}
@@ -966,21 +970,21 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSSet");
- os << " - table = " << Brief(table());
+ os << " - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMap");
- os << " - table = " << Brief(table());
+ os << " - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSCollectionIterator::JSCollectionIteratorPrint(
std::ostream& os) { // NOLINT
- os << "\n - table = " << Brief(table());
- os << "\n - index = " << Brief(index());
+ os << "\n - table: " << Brief(table());
+ os << "\n - index: " << Brief(index());
os << "\n";
}
@@ -999,22 +1003,22 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakMap");
- os << "\n - table = " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakSet");
- os << "\n - table = " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
- os << "\n - backing_store = " << backing_store();
- os << "\n - byte_length = " << Brief(byte_length());
+ os << "\n - backing_store: " << backing_store();
+ os << "\n - byte_length: " << Brief(byte_length());
if (is_external()) os << "\n - external";
if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
@@ -1027,10 +1031,10 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSTypedArray");
- os << "\n - buffer = " << Brief(buffer());
- os << "\n - byte_offset = " << Brief(byte_offset());
- os << "\n - byte_length = " << Brief(byte_length());
- os << "\n - length = " << Brief(length());
+ os << "\n - buffer: " << Brief(buffer());
+ os << "\n - byte_offset: " << Brief(byte_offset());
+ os << "\n - byte_length: " << Brief(byte_length());
+ os << "\n - length: " << Brief(length());
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -1039,18 +1043,17 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
JSObjectPrintHeader(os, this, "JSArrayIterator");
InstanceType instance_type = map()->instance_type();
- std::string type;
+ os << "\n - type: ";
if (instance_type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
- type = "keys";
+ os << "keys";
} else if (instance_type <= LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) {
- type = "entries";
+ os << "entries";
} else {
- type = "values";
+ os << "values";
}
- os << "\n - type = " << type;
- os << "\n - object = " << Brief(object());
- os << "\n - index = " << Brief(index());
+ os << "\n - object: " << Brief(object());
+ os << "\n - index: " << Brief(index());
JSObjectPrintBody(os, this);
}
@@ -1058,8 +1061,8 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
- os << "\n - byte_offset = " << Brief(byte_offset());
- os << "\n - byte_length = " << Brief(byte_length());
+ os << "\n - byte_offset: " << Brief(byte_offset());
+ os << "\n - byte_length: " << Brief(byte_length());
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -1067,45 +1070,15 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSBoundFunction");
- os << "\n - bound_target_function = " << Brief(bound_target_function());
- os << "\n - bound_this = " << Brief(bound_this());
- os << "\n - bound_arguments = " << Brief(bound_arguments());
+ os << "\n - bound_target_function: " << Brief(bound_target_function());
+ os << "\n - bound_this: " << Brief(bound_this());
+ os << "\n - bound_arguments: " << Brief(bound_arguments());
JSObjectPrintBody(os, this);
}
-
-namespace {
-
-std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
- os << "[";
- if (kind == FunctionKind::kNormalFunction) {
- os << " NormalFunction";
- } else {
-#define PRINT_FLAG(name) \
- if (static_cast<int>(kind) & static_cast<int>(FunctionKind::k##name)) { \
- os << " " << #name; \
- }
-
- PRINT_FLAG(ArrowFunction)
- PRINT_FLAG(GeneratorFunction)
- PRINT_FLAG(ConciseMethod)
- PRINT_FLAG(DefaultConstructor)
- PRINT_FLAG(DerivedConstructor)
- PRINT_FLAG(BaseConstructor)
- PRINT_FLAG(GetterFunction)
- PRINT_FLAG(SetterFunction)
- PRINT_FLAG(AsyncFunction)
- PRINT_FLAG(Module)
-#undef PRINT_FLAG
- }
- return os << " ]";
-}
-
-} // namespace
-
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "Function");
- os << "\n - function prototype = ";
+ os << "\n - function prototype: ";
if (has_prototype_slot()) {
if (has_prototype()) {
os << Brief(prototype());
@@ -1113,22 +1086,37 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << " (non-instance prototype)";
}
}
- os << "\n - initial_map = ";
+ os << "\n - initial_map: ";
if (has_initial_map()) os << Brief(initial_map());
} else {
os << "<no-prototype-slot>";
}
- os << "\n - shared_info = " << Brief(shared());
- os << "\n - name = " << Brief(shared()->name());
- os << "\n - formal_parameter_count = "
+ os << "\n - shared_info: " << Brief(shared());
+ os << "\n - name: " << Brief(shared()->name());
+
+ // Print Builtin name for builtin functions
+ int builtin_index = code()->builtin_index();
+ if (builtin_index != -1 && !IsInterpreted()) {
+ if (builtin_index == Builtins::kDeserializeLazy) {
+ if (shared()->HasLazyDeserializationBuiltinId()) {
+ builtin_index = shared()->lazy_deserialization_builtin_id();
+ os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index)
+ << "(lazy)";
+ }
+ } else {
+ os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index);
+ }
+ }
+
+ os << "\n - formal_parameter_count: "
<< shared()->internal_formal_parameter_count();
- os << "\n - kind = " << shared()->kind();
- os << "\n - context = " << Brief(context());
- os << "\n - code = " << Brief(code());
+ os << "\n - kind: " << shared()->kind();
+ os << "\n - context: " << Brief(context());
+ os << "\n - code: " << Brief(code());
if (IsInterpreted()) {
os << "\n - interpreted";
if (shared()->HasBytecodeArray()) {
- os << "\n - bytecode = " << shared()->bytecode_array();
+ os << "\n - bytecode: " << shared()->bytecode_array();
}
}
if (WasmExportedFunction::IsWasmExportedFunction(this)) {
@@ -1142,7 +1130,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
shared()->PrintSourceCode(os);
JSObjectPrintBody(os, this);
os << "\n - feedback vector: ";
- if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (has_feedback_vector()) {
feedback_vector()->FeedbackVectorPrint(os);
} else {
os << "not available\n";
@@ -1151,7 +1139,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
if (HasSourceCode()) {
- os << "\n - source code = ";
+ os << "\n - source code: ";
String* source = String::cast(Script::cast(script())->source());
int start = start_position();
int length = end_position() - start;
@@ -1163,28 +1151,26 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
- os << "\n - name = ";
+ os << "\n - name: ";
if (has_shared_name()) {
os << Brief(raw_name());
} else {
os << "<no-shared-name>";
}
- os << "\n - kind = " << kind();
+ os << "\n - kind: " << kind();
if (needs_home_object()) {
os << "\n - needs_home_object";
}
- os << "\n - function_map_index = " << function_map_index();
- os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
- os << "\n - expected_nof_properties = " << expected_nof_properties();
- os << "\n - language_mode = " << language_mode();
- os << "\n - instance class name = ";
- instance_class_name()->Print(os);
- os << " - code = " << Brief(code());
+ os << "\n - function_map_index: " << function_map_index();
+ os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ os << "\n - expected_nof_properties: " << expected_nof_properties();
+ os << "\n - language_mode: " << language_mode();
+ os << " - code: " << Brief(code());
if (HasBytecodeArray()) {
- os << "\n - bytecode_array = " << bytecode_array();
+ os << "\n - bytecode_array: " << bytecode_array();
}
if (HasAsmWasmData()) {
- os << "\n - asm_wasm_data = " << Brief(asm_wasm_data());
+ os << "\n - asm_wasm_data: " << Brief(asm_wasm_data());
}
PrintSourceCode(os);
// Script files are often large, hard to read.
@@ -1197,19 +1183,20 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
} else if (is_declaration()) {
os << "\n - declaration";
}
- os << "\n - function token position = " << function_token_position();
- os << "\n - start position = " << start_position();
- os << "\n - end position = " << end_position();
+ os << "\n - function token position: " << function_token_position();
+ os << "\n - start position: " << start_position();
+ os << "\n - end position: " << end_position();
if (HasDebugInfo()) {
- os << "\n - debug info = " << Brief(debug_info());
+ os << "\n - debug info: " << Brief(debug_info());
} else {
os << "\n - no debug info";
}
- os << "\n - length = " << length();
- os << "\n - feedback_metadata = ";
+ os << "\n - scope info: " << Brief(scope_info());
+ os << "\n - length: " << length();
+ os << "\n - feedback_metadata: ";
feedback_metadata()->FeedbackMetadataPrint(os);
if (HasPreParsedScopeData()) {
- os << "\n - preparsed scope data = " << preparsed_scope_data();
+ os << "\n - preparsed scope data: " << preparsed_scope_data();
} else {
os << "\n - no preparsed scope data";
}
@@ -1220,7 +1207,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalProxy");
if (!GetIsolate()->bootstrapper()->IsActive()) {
- os << "\n - native context = " << Brief(native_context());
+ os << "\n - native context: " << Brief(native_context());
}
JSObjectPrintBody(os, this);
}
@@ -1229,9 +1216,9 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalObject");
if (!GetIsolate()->bootstrapper()->IsActive()) {
- os << "\n - native context = " << Brief(native_context());
+ os << "\n - native context: " << Brief(native_context());
}
- os << "\n - global proxy = " << Brief(global_proxy());
+ os << "\n - global proxy: " << Brief(global_proxy());
JSObjectPrintBody(os, this);
}
@@ -1338,34 +1325,64 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseCapability");
- os << "\n - promise: " << Brief(promise());
- os << "\n - resolve: " << Brief(resolve());
- os << "\n - reject: " << Brief(reject());
+void CallbackTask::CallbackTaskPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CallbackTask");
+ os << "\n - callback: " << Brief(callback());
+ os << "\n - data: " << Brief(data());
+ os << "\n";
+}
+
+void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CallableTask");
+ os << "\n - context: " << Brief(context());
+ os << "\n - callable: " << Brief(callable());
os << "\n";
}
-void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
+void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
- os << "\n - thenable: " << Brief(thenable());
+ HeapObject::PrintHeader(os, "PromiseFulfillReactionJobTask");
+ os << "\n - argument: " << Brief(argument());
+ os << "\n - context: " << Brief(context());
+ os << "\n - handler: " << Brief(handler());
+ os << "\n - payload: " << Brief(payload());
+ os << "\n";
+}
+
+void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseRejectReactionJobTask");
+ os << "\n - argument: " << Brief(argument());
+ os << "\n - context: " << Brief(context());
+ os << "\n - handler: " << Brief(handler());
+ os << "\n - payload: " << Brief(payload());
+ os << "\n";
+}
+
+void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseResolveThenableJobTask");
+ os << "\n - context: " << Brief(context());
+ os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
os << "\n - then: " << Brief(then());
+ os << "\n - thenable: " << Brief(thenable());
+ os << "\n";
+}
+
+void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseCapability");
+ os << "\n - promise: " << Brief(promise());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
- os << "\n - context: " << Brief(context());
os << "\n";
}
-void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
- os << "\n - value: " << Brief(value());
- os << "\n - tasks: " << Brief(tasks());
- os << "\n - deferred_promise: " << Brief(deferred_promise());
- os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
- os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
- os << "\n - reaction context: " << Brief(context());
+void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseReaction");
+ os << "\n - next: " << Brief(next());
+ os << "\n - reject_handler: " << Brief(reject_handler());
+ os << "\n - fulfill_handler: " << Brief(fulfill_handler());
+ os << "\n - payload: " << Brief(payload());
os << "\n";
}
@@ -1616,6 +1633,58 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+namespace {
+void PrintScopeInfoList(ScopeInfo* scope_info, std::ostream& os,
+ const char* list_name, int nof_internal_slots,
+ int start, int length) {
+ if (length <= 0) return;
+ int end = start + length;
+ os << "\n - " << list_name;
+ if (nof_internal_slots > 0) {
+ os << " " << start << "-" << end << " [internal slots]";
+ }
+ os << " {\n";
+ for (int i = nof_internal_slots; start < end; ++i, ++start) {
+ os << " - " << i << ": ";
+ String::cast(scope_info->get(start))->ShortPrint(os);
+ os << "\n";
+ }
+ os << " }";
+}
+} // namespace
+
+void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ScopeInfo");
+ if (length() == 0) {
+ os << "\n - length = 0";
+ return;
+ }
+
+ os << "\n - scope type: " << scope_type();
+ os << "\n - language mode: " << language_mode();
+ os << "\n - local count: " << LocalCount();
+ os << "\n - stack slot count: " << StackSlotCount();
+ if (HasReceiver()) os << "\n - has receiver";
+ if (HasNewTarget()) os << "\n - needs new target";
+ if (HasOuterScopeInfo()) {
+ os << "\n - outer scope info: " << Brief(OuterScopeInfo());
+ }
+ if (HasFunctionName()) {
+ os << "\n - function name: ";
+ FunctionName()->ShortPrint(os);
+ }
+ os << "\n - length: " << length();
+ if (length() > 0) {
+ PrintScopeInfoList(this, os, "parameters", 0, ParameterNamesIndex(),
+ ParameterCount());
+ PrintScopeInfoList(this, os, "stack slots", 0, StackLocalNamesIndex(),
+ StackLocalCount());
+ PrintScopeInfoList(this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
+ ContextLocalNamesIndex(), ContextLocalCount());
+ // TODO(neis): Print module stuff if present.
+ }
+ os << "\n";
+}
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index af2e3eccb3..9e80224d93 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -60,6 +60,8 @@
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/map.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
@@ -72,6 +74,7 @@
#include "src/string-stream.h"
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
+#include "src/unicode-decoder.h"
#include "src/utils-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
@@ -699,39 +702,6 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
// static
-MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() * rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() / rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(Modulo(lhs->Number(), rhs->Number()));
-}
-
-
-// static
MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
Handle<Object> rhs) {
if (lhs->IsNumber() && rhs->IsNumber()) {
@@ -757,89 +727,6 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
// static
-MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() - rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs)
- << (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) >>
- (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromUint(NumberToUint32(*lhs) >>
- (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) &
- NumberToInt32(*rhs));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) |
- NumberToInt32(*rhs));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) ^
- NumberToInt32(*rhs));
-}
-
-// static
MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
Handle<Object> callable,
Handle<Object> object) {
@@ -1272,7 +1159,7 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
// Support calling this method without an active context, but refuse
// access to access-checked objects in that case.
if (it->isolate()->context() != nullptr && it->HasAccess()) continue;
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::JSPROXY:
it->NotFound();
return it->isolate()->factory()->undefined_value();
@@ -1313,24 +1200,24 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
if (current_info->IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(current_info), isolate);
}
- Handle<Object> class_name(info->class_name(), isolate);
Handle<Name> name;
Handle<String> name_string;
if (maybe_name.ToHandle(&name) && name->IsString()) {
name_string = Handle<String>::cast(name);
+ } else if (info->class_name()->IsString()) {
+ name_string = handle(String::cast(info->class_name()));
} else {
- name_string = class_name->IsString() ? Handle<String>::cast(class_name)
- : isolate->factory()->empty_string();
+ name_string = isolate->factory()->empty_string();
}
Handle<Code> code = BUILTIN_CODE(isolate, HandleApiCall);
bool is_constructor;
FunctionKind function_kind;
- if (!info->remove_prototype()) {
- is_constructor = true;
- function_kind = kNormalFunction;
- } else {
+ if (info->remove_prototype()) {
is_constructor = false;
function_kind = kConciseMethod;
+ } else {
+ is_constructor = true;
+ function_kind = kNormalFunction;
}
Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
name_string, code, is_constructor, function_kind);
@@ -1339,9 +1226,6 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
}
result->set_length(info->length());
- if (class_name->IsString()) {
- result->set_instance_class_name(String::cast(*class_name));
- }
result->set_api_func_data(*info);
result->DontAdaptArguments();
DCHECK(result->IsApiFunction());
@@ -2482,7 +2366,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
Handle<Object> default_species = isolate->array_function();
if (original_array->IsJSArray() &&
Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact()) {
+ isolate->IsSpeciesLookupChainIntact()) {
return default_species;
}
Handle<Object> constructor = isolate->factory()->undefined_value();
@@ -3106,6 +2990,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
return kVisitFixedArray;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3114,6 +2999,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case PROPERTY_ARRAY_TYPE:
return kVisitPropertyArray;
+ case FEEDBACK_CELL_TYPE:
+ return kVisitFeedbackCell;
+
case FEEDBACK_VECTOR_TYPE:
return kVisitFeedbackVector;
@@ -3226,6 +3114,8 @@ VisitorId Map::GetVisitorId(Map* map) {
case FIXED_INT32_ARRAY_TYPE:
case FIXED_FLOAT32_ARRAY_TYPE:
case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ case FIXED_BIGUINT64_ARRAY_TYPE:
+ case FIXED_BIGINT64_ARRAY_TYPE:
return kVisitFixedTypedArrayBase;
case FIXED_FLOAT64_ARRAY_TYPE:
@@ -3401,6 +3291,20 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case PROPERTY_ARRAY_TYPE:
os << "<PropertyArray[" << PropertyArray::cast(this)->length() << "]>";
break;
+ case FEEDBACK_CELL_TYPE: {
+ os << "<FeedbackCell[";
+ if (map() == heap->no_closures_cell_map()) {
+ os << "no closures";
+ } else if (map() == heap->one_closure_cell_map()) {
+ os << "one closure";
+ } else if (map() == heap->many_closures_cell_map()) {
+ os << "many closures";
+ } else {
+ os << "!!!INVALID MAP!!!";
+ }
+ os << "]>";
+ break;
+ }
case FEEDBACK_VECTOR_TYPE:
os << "<FeedbackVector[" << FeedbackVector::cast(this)->length() << "]>";
break;
@@ -3437,6 +3341,9 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case SCOPE_INFO_TYPE:
+ os << "<ScopeInfo[" << ScopeInfo::cast(this)->length() << "]>";
+ break;
case CODE_TYPE: {
Code* code = Code::cast(this);
os << "<Code " << Code::Kind2String(code->kind());
@@ -3540,6 +3447,10 @@ void Tuple3::BriefPrintDetails(std::ostream& os) {
<< Brief(value3());
}
+void CallableTask::BriefPrintDetails(std::ostream& os) {
+ os << " callable=" << Brief(callable());
+}
+
void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
@@ -3589,20 +3500,63 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
String* JSReceiver::class_name() {
- if (IsFunction()) {
- return GetHeap()->Function_string();
+ if (IsFunction()) return GetHeap()->Function_string();
+ if (IsJSArgumentsObject()) return GetHeap()->Arguments_string();
+ if (IsJSArray()) return GetHeap()->Array_string();
+ if (IsJSArrayBuffer()) {
+ if (JSArrayBuffer::cast(this)->is_shared()) {
+ return GetHeap()->SharedArrayBuffer_string();
+ }
+ return GetHeap()->ArrayBuffer_string();
+ }
+ if (IsJSArrayIterator()) return GetHeap()->ArrayIterator_string();
+ if (IsJSDate()) return GetHeap()->Date_string();
+ if (IsJSError()) return GetHeap()->Error_string();
+ if (IsJSGeneratorObject()) return GetHeap()->Generator_string();
+ if (IsJSMap()) return GetHeap()->Map_string();
+ if (IsJSMapIterator()) return GetHeap()->MapIterator_string();
+ if (IsJSProxy()) {
+ return map()->is_callable() ? GetHeap()->Function_string()
+ : GetHeap()->Object_string();
+ }
+ if (IsJSRegExp()) return GetHeap()->RegExp_string();
+ if (IsJSSet()) return GetHeap()->Set_string();
+ if (IsJSSetIterator()) return GetHeap()->SetIterator_string();
+ if (IsJSTypedArray()) {
+#define SWITCH_KIND(Type, type, TYPE, ctype, size) \
+ if (map()->elements_kind() == TYPE##_ELEMENTS) { \
+ return GetHeap()->Type##Array_string(); \
+ }
+ TYPED_ARRAYS(SWITCH_KIND)
+#undef SWITCH_KIND
+ }
+ if (IsJSValue()) {
+ Object* value = JSValue::cast(this)->value();
+ if (value->IsBoolean()) return GetHeap()->Boolean_string();
+ if (value->IsString()) return GetHeap()->String_string();
+ if (value->IsNumber()) return GetHeap()->Number_string();
+ if (value->IsBigInt()) return GetHeap()->BigInt_string();
+ if (value->IsSymbol()) return GetHeap()->Symbol_string();
+ if (value->IsScript()) return GetHeap()->Script_string();
+ UNREACHABLE();
}
+ if (IsJSWeakMap()) return GetHeap()->WeakMap_string();
+ if (IsJSWeakSet()) return GetHeap()->WeakSet_string();
+ if (IsJSGlobalProxy()) return GetHeap()->global_string();
+
Object* maybe_constructor = map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- return String::cast(constructor->shared()->instance_class_name());
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ if (constructor->shared()->IsApiFunction()) {
+ maybe_constructor = constructor->shared()->get_api_func_data();
+ }
+ }
+
+ if (maybe_constructor->IsFunctionTemplateInfo()) {
FunctionTemplateInfo* info = FunctionTemplateInfo::cast(maybe_constructor);
- return info->class_name()->IsString() ? String::cast(info->class_name())
- : GetHeap()->empty_string();
+ if (info->class_name()->IsString()) return String::cast(info->class_name());
}
- // If the constructor is not present, return "Object".
return GetHeap()->Object_string();
}
@@ -3612,7 +3566,8 @@ bool HeapObject::CanBeRehashed() const {
case HASH_TABLE_TYPE:
// TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return IsNameDictionary() || IsGlobalDictionary() ||
- IsNumberDictionary() || IsStringTable() || IsWeakHashTable();
+ IsNumberDictionary() || IsSimpleNumberDictionary() ||
+ IsStringTable() || IsWeakHashTable();
case DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
@@ -3634,6 +3589,8 @@ void HeapObject::RehashBasedOnMap() {
NameDictionary::cast(this)->Rehash();
} else if (IsNumberDictionary()) {
NumberDictionary::cast(this)->Rehash();
+ } else if (IsSimpleNumberDictionary()) {
+ SimpleNumberDictionary::cast(this)->Rehash();
} else if (IsGlobalDictionary()) {
GlobalDictionary::cast(this)->Rehash();
} else if (IsStringTable()) {
@@ -4885,7 +4842,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
}
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::TRANSITION:
*found = false;
return Nothing<bool>();
@@ -4970,7 +4927,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
return JSObject::SetPropertyWithAccessor(&own_lookup, value,
should_throw);
}
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
return RedefineIncompatibleProperty(isolate, it->GetName(), value,
should_throw);
@@ -5054,17 +5011,32 @@ Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
- // Proxies are handled elsewhere. Other non-JSObjects cannot have own
- // properties.
- Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
+ DCHECK_IMPLIES(it->GetReceiver()->IsJSProxy(),
+ it->GetName()->IsPrivateField());
+ DCHECK_IMPLIES(!it->IsElement() && it->GetName()->IsPrivateField(),
+ it->state() == LookupIterator::DATA);
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
// Store on the holder which may be hidden behind the receiver.
DCHECK(it->HolderIsReceiverOrHiddenPrototype());
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
- if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
+ if (it->IsElement() && receiver->IsJSObject() &&
+ JSObject::cast(*receiver)->HasFixedTypedArrayElements()) {
+ ElementsKind elements_kind = JSObject::cast(*receiver)->GetElementsKind();
+ if (elements_kind == BIGINT64_ELEMENTS ||
+ elements_kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
+ BigInt::FromObject(it->isolate(), value),
+ Nothing<bool>());
+ // We have to recheck the length. However, it can only change if the
+ // underlying buffer was neutered, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
+ }
+ } else if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
// We have to recheck the length. However, it can only change if the
@@ -5085,7 +5057,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->JSObjectVerify();
+ receiver->HeapObjectVerify();
}
#endif
return Just(true);
@@ -5096,18 +5068,25 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
ShouldThrow should_throw,
StoreFromKeyed store_mode) {
- if (!it->GetReceiver()->IsJSObject()) {
- if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
- RETURN_FAILURE(it->isolate(), should_throw,
- NewTypeError(MessageTemplate::kProxyPrivate));
- }
+ if (!it->GetReceiver()->IsJSReceiver()) {
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
value, should_throw);
}
+ // Private symbols should be installed on JSProxy using
+ // JSProxy::SetPrivateSymbol.
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate() &&
+ !it->GetName()->IsPrivateField()) {
+ RETURN_FAILURE(it->isolate(), should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+
DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
- Handle<JSObject> receiver = it->GetStoreTarget();
+ Handle<JSReceiver> receiver = it->GetStoreTarget<JSReceiver>();
+ DCHECK_IMPLIES(receiver->IsJSProxy(), it->GetName()->IsPrivateField());
+ DCHECK_IMPLIES(receiver->IsJSProxy(),
+ it->state() == LookupIterator::NOT_FOUND);
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
@@ -5141,9 +5120,10 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
}
}
- Maybe<bool> result = JSObject::AddDataElement(receiver, it->index(), value,
- attributes, should_throw);
- JSObject::ValidateElements(*receiver);
+ Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
+ Maybe<bool> result = JSObject::AddDataElement(
+ receiver_obj, it->index(), value, attributes, should_throw);
+ JSObject::ValidateElements(*receiver_obj);
return result;
} else {
it->UpdateProtector();
@@ -5159,7 +5139,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->JSObjectVerify();
+ receiver->HeapObjectVerify();
}
#endif
}
@@ -7260,7 +7240,6 @@ Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
return Just(true);
}
-
// TODO(jkummerow): Consider unification with FastAsArrayLength() in
// accessors.cc.
bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
@@ -7472,8 +7451,9 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
ShouldThrow should_throw) {
STACK_CHECK(isolate, Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
- return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
- should_throw);
+ DCHECK(!Handle<Symbol>::cast(key)->IsPrivateField());
+ return JSProxy::SetPrivateSymbol(isolate, proxy, Handle<Symbol>::cast(key),
+ desc, should_throw);
}
Handle<String> trap_name = isolate->factory()->defineProperty_string();
// 1. Assert: IsPropertyKey(P) is true.
@@ -7576,12 +7556,12 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
-
// static
-Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
- Handle<Symbol> private_name,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ DCHECK(!private_name->IsPrivateField());
// Despite the generic name, this can only add private data properties.
if (!PropertyDescriptor::IsDataDescriptor(desc) ||
desc->ToAttributes() != DONT_ENUM) {
@@ -7611,7 +7591,6 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
-
// static
Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
Handle<JSReceiver> object,
@@ -8791,9 +8770,10 @@ MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<JSReceiver> object,
PropertyFilter filter,
+ bool try_fast_path,
bool get_entries) {
Handle<FixedArray> values_or_entries;
- if (filter == ENUMERABLE_STRINGS) {
+ if (try_fast_path && filter == ENUMERABLE_STRINGS) {
Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
isolate, object, get_entries, &values_or_entries);
if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
@@ -8846,13 +8826,17 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
}
MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, false);
}
MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, true);
}
bool Map::DictionaryElementsInPrototypeChainOnly() {
@@ -10027,7 +10011,8 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
}
Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value) {
+ Handle<Object> value,
+ PretenureFlag pretenure) {
if (index < array->length()) {
array->set(index, *value);
return array;
@@ -10037,7 +10022,8 @@ Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
capacity = JSObject::NewElementsCapacity(capacity);
} while (capacity <= index);
Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
+ array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity,
+ pretenure);
array->CopyTo(0, *new_array, 0, array->length());
new_array->FillWithHoles(array->length(), new_array->length());
new_array->set(index, *value);
@@ -10493,54 +10479,6 @@ SharedFunctionInfo* DeoptimizationData::GetInlinedFunction(int index) {
}
}
-int HandlerTable::LookupRange(int pc_offset, int* data_out,
- CatchPrediction* prediction_out) {
- int innermost_handler = -1;
-#ifdef DEBUG
- // Assuming that ranges are well nested, we don't need to track the innermost
- // offsets. This is just to verify that the table is actually well nested.
- int innermost_start = std::numeric_limits<int>::min();
- int innermost_end = std::numeric_limits<int>::max();
-#endif
- for (int i = 0; i < length(); i += kRangeEntrySize) {
- int start_offset = Smi::ToInt(get(i + kRangeStartIndex));
- int end_offset = Smi::ToInt(get(i + kRangeEndIndex));
- int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int handler_data = Smi::ToInt(get(i + kRangeDataIndex));
- if (pc_offset >= start_offset && pc_offset < end_offset) {
- DCHECK_GE(start_offset, innermost_start);
- DCHECK_LT(end_offset, innermost_end);
- innermost_handler = handler_offset;
-#ifdef DEBUG
- innermost_start = start_offset;
- innermost_end = end_offset;
-#endif
- if (data_out) *data_out = handler_data;
- if (prediction_out) *prediction_out = prediction;
- }
- }
- return innermost_handler;
-}
-
-
-// TODO(turbofan): Make sure table is sorted and use binary search.
-int HandlerTable::LookupReturn(int pc_offset) {
- for (int i = 0; i < length(); i += kReturnEntrySize) {
- int return_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
- int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
- if (pc_offset == return_offset) {
- return HandlerOffsetField::decode(handler_field);
- }
- }
- return -1;
-}
-
-Handle<HandlerTable> HandlerTable::Empty(Isolate* isolate) {
- return Handle<HandlerTable>::cast(isolate->factory()->empty_fixed_array());
-}
-
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
if (length() != other->length()) return false;
@@ -10560,7 +10498,7 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
// Perform left trimming if requested.
int left = 0;
UnicodeCache* unicode_cache = isolate->unicode_cache();
- if (mode == kTrim || mode == kTrimLeft) {
+ if (mode == kTrim || mode == kTrimStart) {
while (left < length &&
unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
@@ -10569,7 +10507,7 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
// Perform right trimming if requested.
int right = length;
- if (mode == kTrim || mode == kTrimRight) {
+ if (mode == kTrim || mode == kTrimEnd) {
while (
right > left &&
unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
@@ -11908,24 +11846,15 @@ bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
return false;
}
- int i;
- size_t remaining_in_str = static_cast<size_t>(str_len);
- const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
- for (i = 0; i < slen && remaining_in_str > 0; i++) {
- size_t cursor = 0;
- uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
- DCHECK(cursor > 0 && cursor <= remaining_in_str);
- if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- if (i > slen - 1) return false;
- if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
- if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
- } else {
- if (Get(i) != r) return false;
- }
- utf8_data += cursor;
- remaining_in_str -= cursor;
+
+ int i = 0;
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
+ while (i < slen && !it.Done()) {
+ if (Get(i++) != *it) return false;
+ ++it;
}
- return (allow_prefix_match || i == slen) && remaining_in_str == 0;
+
+ return (allow_prefix_match || i == slen) && it.Done();
}
template <>
@@ -12094,37 +12023,31 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
*utf16_length_out = vector_length;
return HashSequentialString(chars.start(), vector_length, seed);
}
+
// Start with a fake length which won't affect computation.
// It will be updated later.
StringHasher hasher(String::kMaxArrayIndexSize, seed);
- size_t remaining = static_cast<size_t>(vector_length);
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
+ DCHECK(hasher.is_array_index_);
+
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(chars);
int utf16_length = 0;
bool is_index = true;
- DCHECK(hasher.is_array_index_);
- while (remaining > 0) {
- size_t consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
- DCHECK(consumed > 0 && consumed <= remaining);
- stream += consumed;
- remaining -= consumed;
- bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // No need to keep hashing. But we do need to calculate utf16_length.
- if (utf16_length > String::kMaxHashCalcLength) continue;
- if (is_two_characters) {
- uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
- uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
- if (is_index) is_index = hasher.UpdateIndex(c1);
- if (is_index) is_index = hasher.UpdateIndex(c2);
- } else {
- hasher.AddCharacter(c);
- if (is_index) is_index = hasher.UpdateIndex(c);
- }
+
+ while (utf16_length < String::kMaxHashCalcLength && !it.Done()) {
+ utf16_length++;
+ uint16_t c = *it;
+ ++it;
+ hasher.AddCharacter(c);
+ if (is_index) is_index = hasher.UpdateIndex(c);
+ }
+
+ // Now that hashing is done, we just need to calculate utf16_length
+ while (!it.Done()) {
+ ++it;
+ utf16_length++;
}
- *utf16_length_out = static_cast<int>(utf16_length);
+
+ *utf16_length_out = utf16_length;
// Must set length here so that hash computation is correct.
hasher.length_ = utf16_length;
return hasher.GetHashField();
@@ -12283,31 +12206,21 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
// static
-void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Isolate* isolate = shared->GetIsolate();
-
- FeedbackVectorState state = function->GetFeedbackVectorState(isolate);
- switch (state) {
- case TOP_LEVEL_SCRIPT_NEEDS_VECTOR: {
- // A top level script didn't get it's literals installed.
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, shared);
- Handle<Cell> new_cell =
- isolate->factory()->NewOneClosureCell(feedback_vector);
- function->set_feedback_vector_cell(*new_cell);
- break;
- }
- case NEEDS_VECTOR: {
+void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ if (function->feedback_cell()->value()->IsUndefined(isolate)) {
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (!shared->HasAsmWasmData()) {
Handle<FeedbackVector> feedback_vector =
FeedbackVector::New(isolate, shared);
- function->feedback_vector_cell()->set_value(*feedback_vector);
- break;
+ if (function->feedback_cell() == isolate->heap()->many_closures_cell()) {
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->NewOneClosureCell(feedback_vector);
+ function->set_feedback_cell(*feedback_cell);
+ } else {
+ function->feedback_cell()->set_value(*feedback_vector);
+ }
}
- case HAS_VECTOR:
- case NO_VECTOR_NEEDED:
- // Nothing to do.
- break;
}
}
@@ -12410,9 +12323,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Object* maybe_constructor = object->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- Isolate* isolate = object->GetIsolate();
- if (!constructor->shared()->IsApiFunction() &&
- object->class_name() == isolate->heap()->Object_string()) {
+ if (!constructor->shared()->IsApiFunction()) {
Context* context = constructor->context()->native_context();
JSFunction* object_function = context->object_function();
object->map()->SetConstructor(object_function);
@@ -13139,9 +13050,6 @@ bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
namespace {
-char const kNativeCodeSource[] = "function () { [native code] }";
-
-
Handle<String> NativeCodeFunctionSourceString(
Handle<SharedFunctionInfo> shared_info) {
Isolate* const isolate = shared_info->GetIsolate();
@@ -13158,7 +13066,7 @@ Handle<String> NativeCodeFunctionSourceString(
// static
Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
Isolate* const isolate = function->GetIsolate();
- return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+ return isolate->factory()->function_native_code_string();
}
@@ -13626,7 +13534,6 @@ bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo* info = DebugInfo::cast(debug_info());
bool has_break_info = info->HasBreakInfo();
- DCHECK_IMPLIES(has_break_info, HasBytecodeArray());
return has_break_info;
}
@@ -14020,7 +13927,7 @@ void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
+ Assembler::FlushICache(instruction_start(), instruction_size());
}
@@ -14061,18 +13968,18 @@ void Code::CopyFrom(const CodeDesc& desc) {
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(GetIsolate(), code->instruction_start(),
+ it.rinfo()->set_target_address(code->instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(
- GetIsolate(), p, UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = instruction_start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
- Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
+ Assembler::FlushICache(instruction_start(), instruction_size());
}
@@ -14081,11 +13988,31 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
return table.FindEntry(pc);
}
+int Code::OffHeapInstructionSize() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return static_cast<int>(stream->byte_length());
+}
+
+Address Code::OffHeapInstructionStart() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return stream->bytes();
+}
+
+Address Code::OffHeapInstructionEnd() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return stream->bytes() + stream->byte_length();
+}
namespace {
template <typename Code>
void SetStackFrameCacheCommon(Handle<Code> code,
- Handle<NumberDictionary> cache) {
+ Handle<SimpleNumberDictionary> cache) {
Handle<Object> maybe_table(code->source_position_table(), code->GetIsolate());
if (maybe_table->IsSourcePositionTableWithFrameCache()) {
Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
@@ -14103,7 +14030,7 @@ void SetStackFrameCacheCommon(Handle<Code> code,
// static
void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<NumberDictionary> cache) {
+ Handle<SimpleNumberDictionary> cache) {
if (abstract_code->IsCode()) {
SetStackFrameCacheCommon(handle(abstract_code->GetCode()), cache);
} else {
@@ -14161,7 +14088,7 @@ int AbstractCode::SourceStatementPosition(int offset) {
}
void JSFunction::ClearTypeFeedbackInfo() {
- if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (feedback_cell()->value()->IsFeedbackVector()) {
FeedbackVector* vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector->ClearSlots(isolate)) {
@@ -14285,30 +14212,6 @@ Code* Code::OptimizedCodeIterator::Next() {
return code;
}
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-const char* Code::ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED:
- return "UNINITIALIZED";
- case PREMONOMORPHIC:
- return "PREMONOMORPHIC";
- case MONOMORPHIC:
- return "MONOMORPHIC";
- case RECOMPUTE_HANDLER:
- return "RECOMPUTE_HANDLER";
- case POLYMORPHIC:
- return "POLYMORPHIC";
- case MEGAMORPHIC:
- return "MEGAMORPHIC";
- case GENERIC:
- return "GENERIC";
- }
- UNREACHABLE();
-}
-
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
#ifdef ENABLE_DISASSEMBLER
namespace {
@@ -14537,34 +14440,6 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
-void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
- os << " from to hdlr\n";
- for (int i = 0; i < length(); i += kRangeEntrySize) {
- int pc_start = Smi::ToInt(get(i + kRangeStartIndex));
- int pc_end = Smi::ToInt(get(i + kRangeEndIndex));
- int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int data = Smi::ToInt(get(i + kRangeDataIndex));
- os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
- << ") -> " << std::setw(4) << handler_offset
- << " (prediction=" << prediction << ", data=" << data << ")\n";
- }
-}
-
-
-void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
- os << " off hdlr (c)\n";
- for (int i = 0; i < length(); i += kReturnEntrySize) {
- int pc_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
- int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
- << handler_offset << " (prediction=" << prediction << ")\n";
- }
-}
-
void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
@@ -14668,10 +14543,11 @@ void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "\n";
}
- if (handler_table()->length() > 0) {
- os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ if (handler_table_offset() > 0) {
+ HandlerTable table(this);
+ os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
if (kind() == OPTIMIZED_FUNCTION) {
- HandlerTable::cast(handler_table())->HandlerTableReturnPrint(os);
+ table.HandlerTableReturnPrint(os);
}
os << "\n";
}
@@ -14744,10 +14620,11 @@ void BytecodeArray::Disassemble(std::ostream& os) {
}
#endif
- os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ os << "Handler Table (size = " << handler_table()->length() << ")\n";
#ifdef ENABLE_DISASSEMBLER
if (handler_table()->length() > 0) {
- HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
+ HandlerTable table(this);
+ table.HandlerTableRangePrint(os);
}
#endif
}
@@ -15720,7 +15597,7 @@ int JSObject::GetFastElementsUsage() {
: store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = SloppyArgumentsElements::cast(store)->arguments();
- // Fall through.
+ V8_FALLTHROUGH;
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
@@ -15938,6 +15815,11 @@ v8::Promise::PromiseState JSPromise::status() const {
return static_cast<v8::Promise::PromiseState>(value);
}
+void JSPromise::set_status(Promise::PromiseState status) {
+ int value = flags() & ~kStatusMask;
+ set_flags(value | status);
+}
+
// static
const char* JSPromise::Status(v8::Promise::PromiseState status) {
switch (status) {
@@ -15951,6 +15833,217 @@ const char* JSPromise::Status(v8::Promise::PromiseState status) {
UNREACHABLE();
}
+// static
+Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
+ Handle<Object> value) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ DCHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ promise->set_status(Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ return TriggerPromiseReactions(isolate, reactions, value,
+ PromiseReaction::kFulfill);
+}
+
+// static
+Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
+ Handle<Object> reason, bool debug_event) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ if (debug_event) isolate->debug()->OnPromiseReject(promise, reason);
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ DCHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ promise->set_status(Promise::kRejected);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ if (!promise->has_handler()) {
+ isolate->ReportPromiseReject(promise, reason, kPromiseRejectWithNoHandler);
+ }
+
+ // 8. Return TriggerPromiseReactions(reactions, reason).
+ return TriggerPromiseReactions(isolate, reactions, reason,
+ PromiseReaction::kReject);
+}
+
+// static
+MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
+ Handle<Object> resolution) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 6. If SameValue(resolution, promise) is true, then
+ if (promise.is_identical_to(resolution)) {
+ // a. Let selfResolutionError be a newly created TypeError object.
+ Handle<Object> self_resolution_error = isolate->factory()->NewTypeError(
+ MessageTemplate::kPromiseCyclic, resolution);
+ // b. Return RejectPromise(promise, selfResolutionError).
+ return Reject(promise, self_resolution_error);
+ }
+
+ // 7. If Type(resolution) is not Object, then
+ if (!resolution->IsJSReceiver()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 8. Let then be Get(resolution, "then").
+ MaybeHandle<Object> then;
+ if (isolate->IsPromiseThenLookupChainIntact(
+ Handle<JSReceiver>::cast(resolution))) {
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ then = isolate->promise_then();
+ } else {
+ then = JSReceiver::GetProperty(Handle<JSReceiver>::cast(resolution),
+ isolate->factory()->then_string());
+ }
+
+ // 9. If then is an abrupt completion, then
+ Handle<Object> then_action;
+ if (!then.ToHandle(&then_action)) {
+ // a. Return RejectPromise(promise, then.[[Value]]).
+ Handle<Object> reason(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ return Reject(promise, reason, false);
+ }
+
+ // 10. Let thenAction be then.[[Value]].
+ // 11. If IsCallable(thenAction) is false, then
+ if (!then_action->IsCallable()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Handle<PromiseResolveThenableJobTask> task =
+ isolate->factory()->NewPromiseResolveThenableJobTask(
+ promise, Handle<JSReceiver>::cast(then_action),
+ Handle<JSReceiver>::cast(resolution), isolate->native_context());
+ if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
+ // Mark the dependency of the new {promise} on the {resolution}.
+ Object::SetProperty(resolution,
+ isolate->factory()->promise_handled_by_symbol(),
+ promise, LanguageMode::kStrict)
+ .Check();
+ }
+ isolate->EnqueueMicrotask(task);
+
+ // 13. Return undefined.
+ return isolate->factory()->undefined_value();
+}
+
+// static
+MaybeHandle<JSPromise> JSPromise::From(Handle<HeapObject> object) {
+ Isolate* const isolate = object->GetIsolate();
+ if (object->IsJSPromise()) {
+ return Handle<JSPromise>::cast(object);
+ } else if (object->IsPromiseCapability()) {
+ Handle<PromiseCapability> capability =
+ Handle<PromiseCapability>::cast(object);
+ if (capability->promise()->IsJSPromise()) {
+ return handle(JSPromise::cast(capability->promise()), isolate);
+ }
+ } else if (object->IsJSGeneratorObject()) {
+ Handle<JSGeneratorObject> generator =
+ Handle<JSGeneratorObject>::cast(object);
+ Handle<Object> handled_by = JSObject::GetDataProperty(
+ generator, isolate->factory()->generator_outer_promise_symbol());
+ if (handled_by->IsJSPromise()) return Handle<JSPromise>::cast(handled_by);
+ }
+ return MaybeHandle<JSPromise>();
+}
+
+// static
+Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
+ Handle<Object> reactions,
+ Handle<Object> argument,
+ PromiseReaction::Type type) {
+ DCHECK(reactions->IsSmi() || reactions->IsPromiseReaction());
+
+ // We need to reverse the {reactions} here, since we record them
+ // on the JSPromise in the reverse order.
+ {
+ DisallowHeapAllocation no_gc;
+ Object* current = *reactions;
+ Object* reversed = Smi::kZero;
+ while (!current->IsSmi()) {
+ Object* next = PromiseReaction::cast(current)->next();
+ PromiseReaction::cast(current)->set_next(reversed);
+ reversed = current;
+ current = next;
+ }
+ reactions = handle(reversed, isolate);
+ }
+
+ // Morph the {reactions} into PromiseReactionJobTasks
+ // and push them onto the microtask queue.
+ while (!reactions->IsSmi()) {
+ Handle<HeapObject> task = Handle<HeapObject>::cast(reactions);
+ Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
+ reactions = handle(reaction->next(), isolate);
+
+ STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ if (type == PromiseReaction::kFulfill) {
+ task->synchronized_set_map(
+ isolate->heap()->promise_fulfill_reaction_job_task_map());
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
+ *argument);
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
+ *isolate->native_context());
+ STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
+ PromiseFulfillReactionJobTask::kHandlerOffset);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseFulfillReactionJobTask::kPayloadOffset);
+ } else {
+ DisallowHeapAllocation no_gc;
+ HeapObject* handler = reaction->reject_handler();
+ task->synchronized_set_map(
+ isolate->heap()->promise_reject_reaction_job_task_map());
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
+ *isolate->native_context());
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(handler);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseRejectReactionJobTask::kPayloadOffset);
+ }
+
+ isolate->EnqueueMicrotask(Handle<PromiseReactionJobTask>::cast(task));
+ }
+
+ return isolate->factory()->undefined_value();
+}
+
namespace {
JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
@@ -16466,8 +16559,6 @@ template class HashTable<ObjectHashTable, ObjectHashTableShape>;
template class HashTable<WeakHashTable, WeakHashTableShape>;
-template class HashTable<TemplateMap, TemplateMapShape>;
-
template class Dictionary<NameDictionary, NameDictionaryShape>;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
@@ -16478,6 +16569,12 @@ template class EXPORT_TEMPLATE_DEFINE(
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Dictionary<NumberDictionary, NumberDictionaryShape>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
@@ -16490,6 +16587,11 @@ template Handle<NumberDictionary>
Dictionary<NumberDictionary, NumberDictionaryShape>::AtPut(
Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails);
+template Handle<SimpleNumberDictionary>
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::AtPut(
+ Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>,
+ PropertyDetails);
+
template Object* Dictionary<
NumberDictionary, NumberDictionaryShape>::SlowReverseLookup(Object* value);
@@ -16504,6 +16606,10 @@ template Handle<NumberDictionary>
Dictionary<NumberDictionary, NumberDictionaryShape>::DeleteEntry(
Handle<NumberDictionary>, int);
+template Handle<SimpleNumberDictionary>
+Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::DeleteEntry(
+ Handle<SimpleNumberDictionary>, int);
+
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
PretenureFlag,
@@ -16537,6 +16643,11 @@ template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
Handle<NameDictionary>, int);
+template Handle<SimpleNumberDictionary>
+Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::Add(
+ Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
+ int*);
+
template int Dictionary<GlobalDictionary,
GlobalDictionaryShape>::NumberOfEnumerableProperties();
@@ -16692,63 +16803,6 @@ size_t JSTypedArray::element_size() {
}
}
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::Create(Isolate* isolate,
- Handle<Object> default_ctor,
- int argc, Handle<Object>* argv,
- const char* method_name) {
- // 1. Let newTypedArray be ? Construct(constructor, argumentList).
- Handle<Object> new_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, new_obj,
- Execution::New(isolate, default_ctor, argc, argv),
- JSTypedArray);
-
- // 2. Perform ? ValidateTypedArray(newTypedArray).
- Handle<JSTypedArray> new_array;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, new_array, JSTypedArray::Validate(isolate, new_obj, method_name),
- JSTypedArray);
-
- // 3. If argumentList is a List of a single Number, then
- // If newTypedArray.[[ArrayLength]] < size, throw a TypeError exception.
- DCHECK_IMPLIES(argc == 1, argv[0]->IsSmi());
- if (argc == 1 && new_array->length_value() < argv[0]->Number()) {
- const MessageTemplate::Template message =
- MessageTemplate::kTypedArrayTooShort;
- THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
- }
-
- // 4. Return newTypedArray.
- return new_array;
-}
-
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
- Isolate* isolate, Handle<JSTypedArray> exemplar, int argc,
- Handle<Object>* argv, const char* method_name) {
- // 1. Assert: exemplar is an Object that has a [[TypedArrayName]] internal
- // slot.
- DCHECK(exemplar->IsJSTypedArray());
-
- // 2. Let defaultConstructor be the intrinsic object listed in column one of
- // Table 51 for exemplar.[[TypedArrayName]].
- Handle<JSFunction> default_ctor =
- JSTypedArray::DefaultConstructor(isolate, exemplar);
-
- // 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- Handle<Object> ctor = default_ctor;
- if (!exemplar->HasJSTypedArrayPrototype(isolate) ||
- !isolate->IsArraySpeciesLookupChainIntact()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor,
- Object::SpeciesConstructor(isolate, exemplar, default_ctor),
- JSTypedArray);
- }
-
- // 4. Return ? TypedArrayCreate(constructor, argumentList).
- return Create(isolate, ctor, argc, argv, method_name);
-}
-
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
// Regardless of whether the property is there or not invalidate
@@ -17235,8 +17289,9 @@ int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
return -1;
}
-void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
- Handle<Context> native_context, Handle<Cell> literals) {
+void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context,
+ Handle<FeedbackCell> feedback_cell) {
Isolate* isolate = native_context->GetIsolate();
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kLiteralEntryLength == 2);
@@ -17255,7 +17310,7 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
if (entry >= 0) {
// Just set the code of the entry.
Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(literals);
+ isolate->factory()->NewWeakCell(feedback_cell);
old_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
return;
}
@@ -17280,7 +17335,8 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
}
- Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(feedback_cell);
WeakCell* context_cell = native_context->self_weak_cell();
new_literals_map->set(entry + kLiteralContextOffset, context_cell);
@@ -17292,7 +17348,7 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
WeakCell::cast(new_literals_map->get(i + kLiteralContextOffset));
DCHECK(cell->cleared() || cell->value()->IsNativeContext());
cell = WeakCell::cast(new_literals_map->get(i + kLiteralLiteralsOffset));
- DCHECK(cell->cleared() || (cell->value()->IsCell()));
+ DCHECK(cell->cleared() || (cell->value()->IsFeedbackCell()));
}
#endif
@@ -17302,9 +17358,9 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
}
-Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
- Context* native_context) {
- Cell* result = nullptr;
+FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
+ Context* native_context) {
+ FeedbackCell* result = nullptr;
int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
if (entry >= 0) {
FixedArray* literals_map = FixedArray::cast(cache->get(cache_entry));
@@ -17312,37 +17368,33 @@ Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
WeakCell* cell =
WeakCell::cast(literals_map->get(entry + kLiteralLiteralsOffset));
- result = cell->cleared() ? nullptr : Cell::cast(cell->value());
+ result = cell->cleared() ? nullptr : FeedbackCell::cast(cell->value());
}
- DCHECK(result == nullptr || result->IsCell());
+ DCHECK(result == nullptr || result->IsFeedbackCell());
return result;
}
} // namespace
-InfoVectorPair CompilationCacheTable::LookupScript(Handle<String> src,
- Handle<Context> context,
- LanguageMode language_mode) {
- InfoVectorPair empty_result;
+MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
+ Handle<String> src, Handle<Context> context, LanguageMode language_mode) {
Handle<SharedFunctionInfo> shared(context->closure()->shared());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
int entry = FindEntry(&key);
- if (entry == kNotFound) return empty_result;
+ if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return empty_result;
+ if (!get(index)->IsFixedArray()) return MaybeHandle<SharedFunctionInfo>();
Object* obj = get(index + 1);
if (obj->IsSharedFunctionInfo()) {
- Cell* literals =
- SearchLiteralsMap(this, index + 2, context->native_context());
- return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ return handle(SharedFunctionInfo::cast(obj));
}
- return empty_result;
+ return MaybeHandle<SharedFunctionInfo>();
}
-InfoVectorPair CompilationCacheTable::LookupEval(
+InfoCellPair CompilationCacheTable::LookupEval(
Handle<String> src, Handle<SharedFunctionInfo> outer_info,
Handle<Context> native_context, LanguageMode language_mode, int position) {
- InfoVectorPair empty_result;
+ InfoCellPair empty_result;
StringSharedKey key(src, outer_info, language_mode, position);
int entry = FindEntry(&key);
if (entry == kNotFound) return empty_result;
@@ -17350,9 +17402,9 @@ InfoVectorPair CompilationCacheTable::LookupEval(
if (!get(index)->IsFixedArray()) return empty_result;
Object* obj = get(EntryToIndex(entry) + 1);
if (obj->IsSharedFunctionInfo()) {
- Cell* literals =
+ FeedbackCell* feedback_cell =
SearchLiteralsMap(this, EntryToIndex(entry) + 2, *native_context);
- return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ return InfoCellPair(SharedFunctionInfo::cast(obj), feedback_cell);
}
return empty_result;
}
@@ -17386,7 +17438,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value, Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> value) {
Isolate* isolate = cache->GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<Context> native_context(context->native_context());
@@ -17396,7 +17448,6 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
- AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context, literals);
cache->ElementAdded();
return cache;
}
@@ -17404,7 +17455,8 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<Cell> literals, int position) {
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position) {
Isolate* isolate = cache->GetIsolate();
StringSharedKey key(src, outer_info, value->language_mode(), position);
{
@@ -17413,11 +17465,11 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
if (entry != kNotFound) {
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
- // AddToLiteralsMap may allocate a new sub-array to live in the entry,
- // but it won't change the cache array. Therefore EntryToIndex and
- // entry remains correct.
- AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context,
- literals);
+ // AddToFeedbackCellsMap may allocate a new sub-array to live in the
+ // entry, but it won't change the cache array. Therefore EntryToIndex
+ // and entry remains correct.
+ AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
+ feedback_cell);
return cache;
}
}
@@ -17621,6 +17673,13 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
return dictionary;
}
+// static
+Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
+ Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value) {
+ return AtPut(dictionary, key, value, PropertyDetails::Empty());
+}
+
bool NumberDictionary::HasComplexElements() {
if (!requires_slow_elements()) return false;
Isolate* isolate = this->GetIsolate();
@@ -19004,10 +19063,11 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
// actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length);
+ CHECK(FreePages(allocation.allocation_base, allocation.length));
+ } else {
+ isolate->array_buffer_allocator()->Free(allocation.allocation_base,
+ allocation.length);
}
-
- isolate->array_buffer_allocator()->Free(allocation.allocation_base,
- allocation.length, allocation.mode);
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
@@ -19105,6 +19165,10 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
+ // This code does not know how to materialize from a buffer with guard
+ // regions.
+ DCHECK(!buffer->has_guard_region());
+
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
fixed_typed_array->DataSize());
@@ -19139,7 +19203,8 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
GetIsolate());
if (array_buffer->was_neutered() ||
- array_buffer->backing_store() != nullptr) {
+ array_buffer->backing_store() != nullptr ||
+ array_buffer->has_guard_region()) {
return array_buffer;
}
Handle<JSTypedArray> self(this);
@@ -19218,12 +19283,12 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
return PropertyCellType::kConstant;
case PropertyCellType::kConstant:
if (*value == cell->value()) return PropertyCellType::kConstant;
- // Fall through.
+ V8_FALLTHROUGH;
case PropertyCellType::kConstantType:
if (RemainsConstantType(cell, value)) {
return PropertyCellType::kConstantType;
}
- // Fall through.
+ V8_FALLTHROUGH;
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
}
@@ -19380,7 +19445,7 @@ ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
}
- if (type <= JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) {
+ if (type <= JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) {
kind =
static_cast<ElementsKind>(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
(type - FIRST_ARRAY_VALUE_ITERATOR_TYPE));
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c4e3d972e1..a9da77fce3 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -140,8 +140,7 @@
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
-// - PromiseResolveThenableJobInfo
-// - PromiseReactionJobInfo
+// - PromiseReaction
// - PromiseCapability
// - AccessorPair
// - AccessCheckInfo
@@ -159,10 +158,18 @@
// - SourcePositionTableWithFrameCache
// - CodeCache
// - PrototypeInfo
+// - Microtask
+// - CallbackTask
+// - CallableTask
+// - PromiseReactionJobTask
+// - PromiseFulfillReactionJobTask
+// - PromiseRejectReactionJobTask
+// - PromiseResolveThenableJobTask
// - Module
// - ModuleInfoEntry
// - PreParsedScopeData
// - WeakCell
+// - FeedbackCell
// - FeedbackVector
//
// Formats of Object*:
@@ -184,7 +191,7 @@ enum KeyedAccessStoreMode {
STANDARD_STORE,
STORE_TRANSITION_TO_OBJECT,
STORE_TRANSITION_TO_DOUBLE,
- STORE_AND_GROW_NO_TRANSITION,
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW,
STORE_AND_GROW_TRANSITION_TO_OBJECT,
STORE_AND_GROW_TRANSITION_TO_DOUBLE,
STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
@@ -204,21 +211,25 @@ static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
+static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+}
static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
KeyedAccessStoreMode store_mode) {
if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
return store_mode;
}
- if (store_mode >= STORE_AND_GROW_NO_TRANSITION) {
- return STORE_AND_GROW_NO_TRANSITION;
+ if (store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
+ return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
}
return STANDARD_STORE;
}
static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode >= STORE_AND_GROW_NO_TRANSITION &&
+ return store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW &&
store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
@@ -345,6 +356,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_FLOAT32_ARRAY_TYPE) \
V(FIXED_FLOAT64_ARRAY_TYPE) \
V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ V(FIXED_BIGINT64_ARRAY_TYPE) \
+ V(FIXED_BIGUINT64_ARRAY_TYPE) \
\
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FILLER_TYPE) \
@@ -363,21 +376,29 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_INFO_ENTRY_TYPE) \
V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
+ V(PROMISE_REACTION_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
\
+ V(CALLABLE_TASK_TYPE) \
+ V(CALLBACK_TASK_TYPE) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
+ \
V(FIXED_ARRAY_TYPE) \
V(DESCRIPTOR_ARRAY_TYPE) \
V(HASH_TABLE_TYPE) \
+ V(SCOPE_INFO_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
\
V(CELL_TYPE) \
V(CODE_DATA_CONTAINER_TYPE) \
+ V(FEEDBACK_CELL_TYPE) \
V(FEEDBACK_VECTOR_TYPE) \
V(LOAD_HANDLER_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
@@ -500,6 +521,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
\
V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
@@ -518,6 +541,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) \
\
V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
@@ -551,15 +576,21 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(MODULE, Module, module) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
- promise_reaction_job_info) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
- promise_resolve_thenable_job_info) \
+ V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
+ V(PROMISE_REACTION, PromiseReaction, promise_reaction) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
V(SCRIPT, Script, script) \
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3)
+ V(TUPLE3, Tuple3, tuple3) \
+ V(CALLABLE_TASK, CallableTask, callable_task) \
+ V(CALLBACK_TASK, CallbackTask, callback_task) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task)
#define DATA_HANDLER_LIST(V) \
V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
@@ -714,7 +745,9 @@ enum InstanceType : uint16_t {
FIXED_UINT32_ARRAY_TYPE,
FIXED_FLOAT32_ARRAY_TYPE,
FIXED_FLOAT64_ARRAY_TYPE,
- FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_UINT8_CLAMPED_ARRAY_TYPE,
+ FIXED_BIGINT64_ARRAY_TYPE,
+ FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
@@ -733,23 +766,31 @@ enum InstanceType : uint16_t {
MODULE_INFO_ENTRY_TYPE,
MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
- PROMISE_REACTION_JOB_INFO_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+ PROMISE_CAPABILITY_TYPE,
+ PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
+ CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
+ CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+
// FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
DESCRIPTOR_ARRAY_TYPE,
HASH_TABLE_TYPE,
+ SCOPE_INFO_TYPE,
TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
// Misc.
CELL_TYPE,
CODE_DATA_CONTAINER_TYPE,
+ FEEDBACK_CELL_TYPE,
FEEDBACK_VECTOR_TYPE,
LOAD_HANDLER_TYPE,
PROPERTY_ARRAY_TYPE,
@@ -830,9 +871,12 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of FixedArray.
FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
LAST_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of Microtask.
+ FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
+ LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -907,6 +951,7 @@ class FixedArrayBase;
class PropertyArray;
class FunctionLiteral;
class JSGlobalObject;
+class JSPromise;
class KeyAccumulator;
class LayoutDescriptor;
class LookupIterator;
@@ -921,12 +966,12 @@ class RootVisitor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
+class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
class WeakCell;
class TransitionArray;
class TemplateList;
-class TemplateMap;
template <typename T>
class ZoneForwardList;
@@ -954,12 +999,13 @@ template <class C> inline bool Is(Object* obj);
V(BigInt) \
V(BoilerplateDescription) \
V(Boolean) \
+ V(BooleanWrapper) \
V(BreakPoint) \
V(BreakPointInfo) \
V(ByteArray) \
V(BytecodeArray) \
- V(Callable) \
V(CallHandlerInfo) \
+ V(Callable) \
V(Cell) \
V(ClassBoilerplate) \
V(Code) \
@@ -979,12 +1025,15 @@ template <class C> inline bool Is(Object* obj);
V(ExternalOneByteString) \
V(ExternalString) \
V(ExternalTwoByteString) \
+ V(FeedbackCell) \
V(FeedbackMetadata) \
V(FeedbackVector) \
V(Filler) \
V(FixedArray) \
V(FixedArrayBase) \
V(FixedArrayExact) \
+ V(FixedBigInt64Array) \
+ V(FixedBigUint64Array) \
V(FixedDoubleArray) \
V(FixedFloat32Array) \
V(FixedFloat64Array) \
@@ -1042,30 +1091,34 @@ template <class C> inline bool Is(Object* obj);
V(LoadHandler) \
V(Map) \
V(MapCache) \
+ V(Microtask) \
V(ModuleInfo) \
V(MutableHeapNumber) \
V(Name) \
V(NameDictionary) \
V(NativeContext) \
V(NormalizedMapCache) \
+ V(NumberDictionary) \
+ V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
V(PreParsedScopeData) \
- V(PromiseCapability) \
+ V(PromiseReactionJobTask) \
V(PropertyArray) \
V(PropertyCell) \
V(PropertyDescriptorObject) \
V(RegExpMatchInfo) \
V(ScopeInfo) \
V(ScriptContextTable) \
- V(NumberDictionary) \
+ V(ScriptWrapper) \
V(SeqOneByteString) \
V(SeqString) \
V(SeqTwoByteString) \
V(SharedFunctionInfo) \
+ V(SimpleNumberDictionary) \
V(SlicedString) \
V(SloppyArgumentsElements) \
V(SmallOrderedHashMap) \
@@ -1078,9 +1131,9 @@ template <class C> inline bool Is(Object* obj);
V(StringWrapper) \
V(Struct) \
V(Symbol) \
+ V(SymbolWrapper) \
V(TemplateInfo) \
V(TemplateList) \
- V(TemplateMap) \
V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
@@ -1210,8 +1263,6 @@ class Object {
// implementation of a JSObject's elements.
inline bool HasValidElements();
- inline bool HasSpecificClassOf(String* name);
-
bool BooleanValue(); // ECMA-262 9.2.
// ES6 section 7.2.11 Abstract Relational Comparison
@@ -1304,34 +1355,10 @@ class Object {
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
- // ES6 section 12.6 Multiplicative Operators
- MUST_USE_RESULT static MaybeHandle<Object> Multiply(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Divide(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Modulus(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
// ES6 section 12.7 Additive Operators
MUST_USE_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
Handle<Object> lhs,
Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Subtract(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
- // ES6 section 12.8 Bitwise Shift Operators
- MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> ShiftRightLogical(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs);
// ES6 section 12.9 Relational Operators
MUST_USE_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
@@ -1343,17 +1370,6 @@ class Object {
MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(Handle<Object> x,
Handle<Object> y);
- // ES6 section 12.11 Binary Bitwise Operators
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
// ES6 section 7.3.19 OrdinaryHasInstance (C, O).
MUST_USE_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
Isolate* isolate, Handle<Object> callable, Handle<Object> object);
@@ -1800,7 +1816,7 @@ class HeapObject: public Object {
static void VerifyHeapPointer(Object* p);
#endif
- inline AllocationAlignment RequiredAlignment() const;
+ static inline AllocationAlignment RequiredAlignment(Map* map);
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into
@@ -1900,7 +1916,10 @@ enum AccessorComponent {
ACCESSOR_SETTER
};
-enum class GetKeysConversion { kKeepNumbers, kConvertToString };
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString)
+};
enum class KeyCollectionMode {
kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
@@ -2182,10 +2201,12 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
static const int kHashMask = PropertyArray::HashField::kMask;
@@ -2273,6 +2294,8 @@ class JSObject: public JSReceiver {
inline bool HasFixedUint32Elements();
inline bool HasFixedFloat32Elements();
inline bool HasFixedFloat64Elements();
+ inline bool HasFixedBigInt64Elements();
+ inline bool HasFixedBigUint64Elements();
inline bool HasFastArgumentsElements();
inline bool HasSlowArgumentsElements();
@@ -2872,86 +2895,6 @@ class Tuple3 : public Tuple2 {
DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
};
-class PromiseCapability : public Tuple3 {
- public:
- DECL_CAST(PromiseCapability)
- DECL_PRINTER(PromiseCapability)
- DECL_VERIFIER(PromiseCapability)
-
- DECL_ACCESSORS(promise, Object)
- DECL_ACCESSORS(resolve, Object)
- DECL_ACCESSORS(reject, Object)
-
- static const int kPromiseOffset = Tuple3::kValue1Offset;
- static const int kResolveOffset = Tuple3::kValue2Offset;
- static const int kRejectOffset = Tuple3::kValue3Offset;
- static const int kSize = Tuple3::kSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
-};
-
-// A container struct to hold state required for PromiseResolveThenableJob.
-class PromiseResolveThenableJobInfo : public Struct {
- public:
- DECL_ACCESSORS(thenable, JSReceiver)
- DECL_ACCESSORS(then, JSReceiver)
- DECL_ACCESSORS(resolve, JSFunction)
- DECL_ACCESSORS(reject, JSFunction)
-
- DECL_ACCESSORS(context, Context)
-
- static const int kThenableOffset = Struct::kHeaderSize;
- static const int kThenOffset = kThenableOffset + kPointerSize;
- static const int kResolveOffset = kThenOffset + kPointerSize;
- static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kContextOffset = kRejectOffset + kPointerSize;
- static const int kSize = kContextOffset + kPointerSize;
-
- DECL_CAST(PromiseResolveThenableJobInfo)
- DECL_PRINTER(PromiseResolveThenableJobInfo)
- DECL_VERIFIER(PromiseResolveThenableJobInfo)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
-};
-
-class JSPromise;
-
-// Struct to hold state required for PromiseReactionJob.
-class PromiseReactionJobInfo : public Struct {
- public:
- DECL_ACCESSORS(value, Object)
- DECL_ACCESSORS(tasks, Object)
-
- // Check comment in JSPromise for information on what state these
- // deferred fields could be in.
- DECL_ACCESSORS(deferred_promise, Object)
- DECL_ACCESSORS(deferred_on_resolve, Object)
- DECL_ACCESSORS(deferred_on_reject, Object)
-
- DECL_INT_ACCESSORS(debug_id)
-
- DECL_ACCESSORS(context, Context)
-
- static const int kValueOffset = Struct::kHeaderSize;
- static const int kTasksOffset = kValueOffset + kPointerSize;
- static const int kDeferredPromiseOffset = kTasksOffset + kPointerSize;
- static const int kDeferredOnResolveOffset =
- kDeferredPromiseOffset + kPointerSize;
- static const int kDeferredOnRejectOffset =
- kDeferredOnResolveOffset + kPointerSize;
- static const int kContextOffset = kDeferredOnRejectOffset + kPointerSize;
- static const int kSize = kContextOffset + kPointerSize;
-
- DECL_CAST(PromiseReactionJobInfo)
- DECL_PRINTER(PromiseReactionJobInfo)
- DECL_VERIFIER(PromiseReactionJobInfo)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
-};
-
class AsyncGeneratorRequest : public Struct {
public:
// Holds an AsyncGeneratorRequest, or Undefined.
@@ -3128,8 +3071,8 @@ class ContextExtension : public Struct {
V(String.prototype, toString, StringToString) \
V(String.prototype, toUpperCase, StringToUpperCase) \
V(String.prototype, trim, StringTrim) \
- V(String.prototype, trimLeft, StringTrimLeft) \
- V(String.prototype, trimRight, StringTrimRight) \
+ V(String.prototype, trimLeft, StringTrimStart) \
+ V(String.prototype, trimRight, StringTrimEnd) \
V(String.prototype, valueOf, StringValueOf) \
V(String, fromCharCode, StringFromCharCode) \
V(String, fromCodePoint, StringFromCodePoint) \
@@ -3335,14 +3278,14 @@ class JSAsyncGeneratorObject : public JSGeneratorObject {
// undefined.
DECL_ACCESSORS(queue, HeapObject)
- // [awaited_promise]
- // A reference to the Promise of an AwaitExpression.
- DECL_ACCESSORS(awaited_promise, HeapObject)
+ // [is_awaiting]
+ // Whether or not the generator is currently awaiting.
+ DECL_INT_ACCESSORS(is_awaiting)
// Layout description.
static const int kQueueOffset = JSGeneratorObject::kSize;
- static const int kAwaitedPromiseOffset = kQueueOffset + kPointerSize;
- static const int kSize = kAwaitedPromiseOffset + kPointerSize;
+ static const int kIsAwaitingOffset = kQueueOffset + kPointerSize;
+ static const int kSize = kIsAwaitingOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncGeneratorObject);
@@ -3477,22 +3420,14 @@ class JSFunction: public JSObject {
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
- // [feedback_vector_cell]: The feedback vector.
- DECL_ACCESSORS(feedback_vector_cell, Cell)
-
- enum FeedbackVectorState {
- TOP_LEVEL_SCRIPT_NEEDS_VECTOR,
- NEEDS_VECTOR,
- HAS_VECTOR,
- NO_VECTOR_NEEDED
- };
-
- inline FeedbackVectorState GetFeedbackVectorState(Isolate* isolate) const;
+ // [feedback_cell]: The FeedbackCell used to hold the FeedbackVector
+ // eventually.
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
// feedback_vector() can be used once the function is compiled.
inline FeedbackVector* feedback_vector() const;
inline bool has_feedback_vector() const;
- static void EnsureLiterals(Handle<JSFunction> function);
+ static void EnsureFeedbackVector(Handle<JSFunction> function);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
@@ -3575,7 +3510,7 @@ class JSFunction: public JSObject {
/* Pointer fields. */ \
V(kSharedFunctionInfoOffset, kPointerSize) \
V(kContextOffset, kPointerSize) \
- V(kFeedbackVectorOffset, kPointerSize) \
+ V(kFeedbackCellOffset, kPointerSize) \
V(kEndOfStrongFieldsOffset, 0) \
V(kCodeOffset, kPointerSize) \
/* Size of JSFunction object without prototype field. */ \
@@ -3851,76 +3786,6 @@ class JSMessageObject: public JSObject {
typedef BodyDescriptor BodyDescriptorWeak;
};
-class JSPromise : public JSObject {
- public:
- DECL_ACCESSORS(result, Object)
-
- // There are 3 possible states for these fields --
- // 1) Undefined -- This is the zero state when there is no callback
- // or deferred fields registered.
- //
- // 2) Object -- There is a single callback directly attached to the
- // fulfill_reactions, reject_reactions and the deferred fields are
- // directly attached to the slots. In this state, deferred_promise
- // is a JSReceiver and deferred_on_{resolve, reject} are Callables.
- //
- // 3) FixedArray -- There is more than one callback and deferred
- // fields attached to a FixedArray.
- //
- // The callback can be a Callable or a Symbol.
- DECL_ACCESSORS(deferred_promise, Object)
- DECL_ACCESSORS(deferred_on_resolve, Object)
- DECL_ACCESSORS(deferred_on_reject, Object)
- DECL_ACCESSORS(fulfill_reactions, Object)
- DECL_ACCESSORS(reject_reactions, Object)
-
- DECL_INT_ACCESSORS(flags)
-
- // [has_handler]: Whether this promise has a reject handler or not.
- DECL_BOOLEAN_ACCESSORS(has_handler)
-
- // [handled_hint]: Whether this promise will be handled by a catch
- // block in an async function.
- DECL_BOOLEAN_ACCESSORS(handled_hint)
-
- static const char* Status(v8::Promise::PromiseState status);
- v8::Promise::PromiseState status() const;
-
- DECL_CAST(JSPromise)
-
- // Dispatched behavior.
- DECL_PRINTER(JSPromise)
- DECL_VERIFIER(JSPromise)
-
- // Layout description.
- static const int kResultOffset = JSObject::kHeaderSize;
- static const int kDeferredPromiseOffset = kResultOffset + kPointerSize;
- static const int kDeferredOnResolveOffset =
- kDeferredPromiseOffset + kPointerSize;
- static const int kDeferredOnRejectOffset =
- kDeferredOnResolveOffset + kPointerSize;
- static const int kFulfillReactionsOffset =
- kDeferredOnRejectOffset + kPointerSize;
- static const int kRejectReactionsOffset =
- kFulfillReactionsOffset + kPointerSize;
- static const int kFlagsOffset = kRejectReactionsOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
- static const int kSizeWithEmbedderFields =
- kSize + v8::Promise::kEmbedderFieldCount * kPointerSize;
-
- // Flags layout.
- // The first two bits store the v8::Promise::PromiseState.
- static const int kStatusBits = 2;
- static const int kHasHandlerBit = 2;
- static const int kHandledHintBit = 3;
-
- static const int kStatusShift = 0;
- static const int kStatusMask = 0x3;
- STATIC_ASSERT(v8::Promise::kPending == 0);
- STATIC_ASSERT(v8::Promise::kFulfilled == 1);
- STATIC_ASSERT(v8::Promise::kRejected == 2);
-};
-
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -4217,6 +4082,33 @@ class Cell: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
};
+// This is a special cell used to maintain both the link between a
+// closure and it's feedback vector, as well as a way to count the
+// number of closures created for a certain function per native
+// context. There's at most one FeedbackCell for each function in
+// a native context.
+class FeedbackCell : public Struct {
+ public:
+ // [value]: value of the cell.
+ DECL_ACCESSORS(value, HeapObject)
+
+ DECL_CAST(FeedbackCell)
+
+ // Dispatched behavior.
+ DECL_PRINTER(FeedbackCell)
+ DECL_VERIFIER(FeedbackCell)
+
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kValueOffset, kValueOffset + kPointerSize, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackCell);
+};
class PropertyCell : public HeapObject {
public:
@@ -4415,10 +4307,10 @@ class JSProxy: public JSReceiver {
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
- static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
- Handle<Symbol> private_name,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
@@ -4459,9 +4351,14 @@ class JSAsyncFromSyncIterator : public JSObject {
// (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
DECL_ACCESSORS(sync_iterator, JSReceiver)
+ // The "next" method is loaded during GetIterator, and is not reloaded for
+ // subsequent "next" invocations.
+ DECL_ACCESSORS(next, Object)
+
// Offsets of object fields.
static const int kSyncIteratorOffset = JSObject::kHeaderSize;
- static const int kSize = kSyncIteratorOffset + kPointerSize;
+ static const int kNextOffset = kSyncIteratorOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
@@ -5002,7 +4899,7 @@ class StackFrameInfo : public Struct {
class SourcePositionTableWithFrameCache : public Tuple2 {
public:
DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, NumberDictionary)
+ DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
DECL_CAST(SourcePositionTableWithFrameCache)
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index df5f854395..c107ab8cd1 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -40,7 +40,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
// Allocation helpers.
- static MaybeHandle<MutableBigInt> New(Isolate* isolate, int length);
+ static MaybeHandle<MutableBigInt> New(Isolate* isolate, int length,
+ PretenureFlag pretenure = NOT_TENURED);
static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
static Handle<BigInt> NewFromSafeInteger(Isolate* isolate, double value);
void InitializeDigits(int length, byte value = 0);
@@ -145,6 +146,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static Rounding DecideRounding(Handle<BigIntBase> x, int mantissa_bits_unset,
int digit_index, uint64_t current_digit);
+ // Returns the least significant 64 bits, simulating two's complement
+ // representation.
+ static uint64_t GetRawBits(BigIntBase* x, bool* lossless);
+
// Digit arithmetic helpers.
static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
@@ -174,14 +179,18 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
(*reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address))) = value;
}
#include "src/objects/object-macros-undef.h"
+
+ void set_64_bits(uint64_t bits);
};
-MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length) {
+MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
+ PretenureFlag pretenure) {
if (length > BigInt::kMaxLength) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
MutableBigInt);
}
- Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
+ Handle<MutableBigInt> result =
+ Cast(isolate->factory()->NewBigInt(length, pretenure));
result->set_length(length);
result->set_sign(false);
#if DEBUG
@@ -218,13 +227,7 @@ Handle<BigInt> MutableBigInt::NewFromSafeInteger(Isolate* isolate,
Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
result->set_length(length);
result->set_sign(value < 0); // Treats -0 like 0.
- if (kDigitBits == 64) {
- result->set_digit(0, absolute);
- } else {
- DCHECK_EQ(kDigitBits, 32);
- result->set_digit(0, absolute);
- result->set_digit(1, absolute >> 32);
- }
+ result->set_64_bits(absolute);
return MakeImmutable(result);
}
@@ -1702,7 +1705,8 @@ static const int kBitsPerCharTableShift = 5;
static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw) {
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ PretenureFlag pretenure) {
DCHECK(2 <= radix && radix <= 36);
DCHECK_GE(charcount, 0);
size_t bits_per_char = kMaxBitsPerChar[radix];
@@ -1717,7 +1721,7 @@ MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
int length = (static_cast<int>(bits_min) + kDigitBits - 1) / kDigitBits;
if (length <= kMaxLength) {
Handle<MutableBigInt> result =
- MutableBigInt::New(isolate, length).ToHandleChecked();
+ MutableBigInt::New(isolate, length, pretenure).ToHandleChecked();
result->InitializeDigits(length);
return result;
}
@@ -2079,6 +2083,68 @@ Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(int n,
return MakeImmutable(result);
}
+Handle<BigInt> BigInt::FromInt64(Isolate* isolate, int64_t n) {
+ if (n == 0) return MutableBigInt::Zero(isolate);
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ int length = 64 / kDigitBits;
+ Handle<MutableBigInt> result =
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ uint64_t absolute;
+ if (n > 0) {
+ result->set_sign(false);
+ absolute = static_cast<uint64_t>(n);
+ } else {
+ result->set_sign(true);
+ if (n == std::numeric_limits<int64_t>::min()) {
+ absolute = static_cast<uint64_t>(std::numeric_limits<int64_t>::max()) + 1;
+ } else {
+ absolute = static_cast<uint64_t>(-n);
+ }
+ }
+ result->set_64_bits(absolute);
+ return MutableBigInt::MakeImmutable(result);
+}
+
+Handle<BigInt> BigInt::FromUint64(Isolate* isolate, uint64_t n) {
+ if (n == 0) return MutableBigInt::Zero(isolate);
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ int length = 64 / kDigitBits;
+ Handle<MutableBigInt> result =
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ result->set_sign(false);
+ result->set_64_bits(n);
+ return MutableBigInt::MakeImmutable(result);
+}
+
+uint64_t MutableBigInt::GetRawBits(BigIntBase* x, bool* lossless) {
+ if (lossless != nullptr) *lossless = true;
+ if (x->is_zero()) return 0;
+ int len = x->length();
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ if (lossless != nullptr && len > 64 / kDigitBits) *lossless = false;
+ uint64_t raw = static_cast<uint64_t>(x->digit(0));
+ if (kDigitBits == 32 && len > 1) {
+ raw |= static_cast<uint64_t>(x->digit(1)) << 32;
+ }
+ // Simulate two's complement. MSVC dislikes "-raw".
+ return x->sign() ? ((~raw) + 1u) : raw;
+}
+
+int64_t BigInt::AsInt64(bool* lossless) {
+ uint64_t raw = MutableBigInt::GetRawBits(this, lossless);
+ int64_t result = static_cast<int64_t>(raw);
+ if (lossless != nullptr && (result < 0) != sign()) *lossless = false;
+ return result;
+}
+
+uint64_t BigInt::AsUint64(bool* lossless) {
+ uint64_t result = MutableBigInt::GetRawBits(this, lossless);
+ if (lossless != nullptr && sign()) *lossless = false;
+ return result;
+}
+
// Digit arithmetic helpers.
#if V8_TARGET_ARCH_32_BIT
@@ -2240,20 +2306,30 @@ BigInt::digit_t MutableBigInt::digit_pow(digit_t base, digit_t exponent) {
#undef HAVE_TWODIGIT_T
+void MutableBigInt::set_64_bits(uint64_t bits) {
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ if (kDigitBits == 64) {
+ set_digit(0, static_cast<digit_t>(bits));
+ } else {
+ set_digit(0, static_cast<digit_t>(bits & 0xFFFFFFFFu));
+ set_digit(1, static_cast<digit_t>(bits >> 32));
+ }
+}
+
#ifdef OBJECT_PRINT
void BigInt::BigIntPrint(std::ostream& os) {
DisallowHeapAllocation no_gc;
HeapObject::PrintHeader(os, "BigInt");
int len = length();
- os << "- length: " << len << "\n";
- os << "- sign: " << sign() << "\n";
+ os << "\n- length: " << len;
+ os << "\n- sign: " << sign();
if (len > 0) {
- os << "- digits:";
+ os << "\n- digits:";
for (int i = 0; i < len; i++) {
os << "\n 0x" << std::hex << digit(i);
}
- os << std::dec << "\n";
}
+ os << std::dec << "\n";
}
#endif // OBJECT_PRINT
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 9e29a69b3b..7409f0bade 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -137,6 +137,11 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
static Handle<BigInt> AsIntN(uint64_t n, Handle<BigInt> x);
static MaybeHandle<BigInt> AsUintN(uint64_t n, Handle<BigInt> x);
+ static Handle<BigInt> FromInt64(Isolate* isolate, int64_t n);
+ static Handle<BigInt> FromUint64(Isolate* isolate, uint64_t n);
+ int64_t AsInt64(bool* lossless = nullptr);
+ uint64_t AsUint64(bool* lossless = nullptr);
+
DECL_CAST(BigInt)
DECL_VERIFIER(BigInt)
DECL_PRINTER(BigInt)
@@ -162,12 +167,13 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
class BodyDescriptor;
private:
- friend class BigIntParseIntHelper;
+ friend class StringToBigIntHelper;
- // Special functions for BigIntParseIntHelper:
+ // Special functions for StringToBigIntHelper:
static Handle<BigInt> Zero(Isolate* isolate);
static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw);
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ PretenureFlag pretenure);
static void InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
uintptr_t factor, uintptr_t summand);
static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 4c3e7f0d97..8b14034f26 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -26,7 +26,6 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
-CAST_ACCESSOR(HandlerTable)
int AbstractCode::instruction_size() {
if (IsCode()) {
@@ -149,12 +148,12 @@ void DependentCode::copy(int from, int to) {
}
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
!GetHeap()->InNewSpace(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
-CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionsOffset)
@@ -164,7 +163,6 @@ CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
- WRITE_FIELD(this, kHandlerTableOffset, nullptr);
WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
WRITE_FIELD(this, kProtectedInstructionsOffset, nullptr);
@@ -204,14 +202,35 @@ void Code::set_next_code_link(Object* value) {
code_data_container()->set_next_code_link(value);
}
+int Code::InstructionSize() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionSize();
+#endif
+ return instruction_size();
+}
+
byte* Code::instruction_start() const {
return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
}
+Address Code::InstructionStart() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionStart();
+#endif
+ return instruction_start();
+}
+
byte* Code::instruction_end() const {
return instruction_start() + instruction_size();
}
+Address Code::InstructionEnd() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionEnd();
+#endif
+ return instruction_end();
+}
+
int Code::GetUnwindingInfoSizeOffset() const {
DCHECK(has_unwinding_info());
return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
@@ -252,7 +271,6 @@ int Code::SizeIncludingMetadata() const {
int size = CodeSize();
size += relocation_info()->Size();
size += deoptimization_data()->Size();
- size += handler_table()->Size();
size += protected_instructions()->Size();
return size;
}
@@ -618,7 +636,7 @@ int BytecodeArray::parameter_count() const {
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
-ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
+ACCESSORS(BytecodeArray, handler_table, ByteArray, kHandlerTableOffset)
ACCESSORS(BytecodeArray, source_position_table, Object,
kSourcePositionTableOffset)
@@ -657,55 +675,6 @@ int BytecodeArray::SizeIncludingMetadata() {
return size;
}
-int HandlerTable::GetRangeStart(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeStartIndex));
-}
-
-int HandlerTable::GetRangeEnd(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeEndIndex));
-}
-
-int HandlerTable::GetRangeHandler(int index) const {
- return HandlerOffsetField::decode(
- Smi::ToInt(get(index * kRangeEntrySize + kRangeHandlerIndex)));
-}
-
-int HandlerTable::GetRangeData(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeDataIndex));
-}
-
-void HandlerTable::SetRangeStart(int index, int value) {
- set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeEnd(int index, int value) {
- set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeHandler(int index, int offset,
- CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
- set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeData(int index, int value) {
- set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetReturnOffset(int index, int value) {
- set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetReturnHandler(int index, int offset) {
- int value = HandlerOffsetField::encode(offset);
- set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
-}
-
-int HandlerTable::NumberOfRangeEntries() const {
- return length() / kRangeEntrySize;
-}
-
BailoutId DeoptimizationData::BytecodeOffset(int i) {
return BailoutId(BytecodeOffsetRaw(i)->value());
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index c43e07c1f9..19e1002f77 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_CODE_H_
#define V8_OBJECTS_CODE_H_
+#include "src/handler-table.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
@@ -18,92 +19,6 @@ class ByteArray;
class BytecodeArray;
class CodeDataContainer;
-// HandlerTable is a fixed array containing entries for exception handlers in
-// the code object it is associated with. The tables comes in two flavors:
-// 1) Based on ranges: Used for unoptimized code. Contains one entry per
-// exception handler and a range representing the try-block covered by that
-// handler. Layout looks as follows:
-// [ range-start , range-end , handler-offset , handler-data ]
-// 2) Based on return addresses: Used for turbofanned code. Contains one entry
-// per call-site that could throw an exception. Layout looks as follows:
-// [ return-address-offset , handler-offset ]
-class HandlerTable : public FixedArray {
- public:
- // Conservative prediction whether a given handler will locally catch an
- // exception or cause a re-throw to outside the code boundary. Since this is
- // undecidable it is merely an approximation (e.g. useful for debugger).
- enum CatchPrediction {
- UNCAUGHT, // The handler will (likely) rethrow the exception.
- CAUGHT, // The exception will be caught by the handler.
- PROMISE, // The exception will be caught and cause a promise rejection.
- DESUGARING, // The exception will be caught, but both the exception and the
- // catching are part of a desugaring and should therefore not
- // be visible to the user (we won't notify the debugger of such
- // exceptions).
- ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
- // in the desugaring of an async function, so special
- // async/await handling in the debugger can take place.
- };
-
- // Getters for handler table based on ranges.
- inline int GetRangeStart(int index) const;
- inline int GetRangeEnd(int index) const;
- inline int GetRangeHandler(int index) const;
- inline int GetRangeData(int index) const;
-
- // Setters for handler table based on ranges.
- inline void SetRangeStart(int index, int value);
- inline void SetRangeEnd(int index, int value);
- inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
- inline void SetRangeData(int index, int value);
-
- // Setters for handler table based on return addresses.
- inline void SetReturnOffset(int index, int value);
- inline void SetReturnHandler(int index, int offset);
-
- // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
- // the start of the potentially throwing instruction (using return addresses
- // for this value would be invalid).
- int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
-
- // Lookup handler in a table based on return addresses.
- int LookupReturn(int pc_offset);
-
- // Returns the number of entries in the table.
- inline int NumberOfRangeEntries() const;
-
- // Returns the required length of the underlying fixed array.
- static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
- static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
-
- // Returns an empty handler table.
- static Handle<HandlerTable> Empty(Isolate* isolate);
-
- DECL_CAST(HandlerTable)
-
-#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
-#endif
-
- private:
- // Layout description for handler table based on ranges.
- static const int kRangeStartIndex = 0;
- static const int kRangeEndIndex = 1;
- static const int kRangeHandlerIndex = 2;
- static const int kRangeDataIndex = 3;
- static const int kRangeEntrySize = 4;
-
- // Layout description for handler table based on return addresses.
- static const int kReturnOffsetIndex = 0;
- static const int kReturnHandlerIndex = 1;
- static const int kReturnEntrySize = 2;
-
- // Encoding of the {handler} field.
- class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
- class HandlerOffsetField : public BitField<int, 3, 29> {};
-};
-
// Code describes objects with on-the-fly generated machine code.
class Code : public HeapObject {
public:
@@ -133,27 +48,28 @@ class Code : public HeapObject {
static const char* Kind2String(Kind kind);
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- // Printing
- static const char* ICState2String(InlineCacheState state);
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, std::ostream& os,
void* current_pc = nullptr); // NOLINT
#endif
- // [instruction_size]: Size of the native instructions
+ // [instruction_size]: Size of the native instructions, including embedded
+ // data such as the safepoints table.
inline int instruction_size() const;
inline void set_instruction_size(int value);
+ // Returns the size of the native instructions, including embedded
+ // data such as the safepoints table. For off-heap code objects
+ // this may from instruction_size in that this will return the size of the
+ // off-heap instruction stream rather than the on-heap trampoline located
+ // at instruction_start.
+ inline int InstructionSize();
+ int OffHeapInstructionSize();
+
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateEmbeddedObjects();
- // [handler_table]: Fixed array containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
-
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
@@ -245,6 +161,11 @@ class Code : public HeapObject {
inline int safepoint_table_offset() const;
inline void set_safepoint_table_offset(int offset);
+ // [handler_table_offset]: The offset in the instruction stream where the
+ // exception handler table starts.
+ inline int handler_table_offset() const;
+ inline void set_handler_table_offset(int offset);
+
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization() const;
@@ -302,9 +223,21 @@ class Code : public HeapObject {
// Returns the address of the first instruction.
inline byte* instruction_start() const;
+ // Returns the address of the first instruction. For off-heap code objects
+ // this differs from instruction_start (which would point to the off-heap
+ // trampoline instead).
+ inline Address InstructionStart();
+ Address OffHeapInstructionStart();
+
// Returns the address right after the last instruction.
inline byte* instruction_end() const;
+ // Returns the address right after the last instruction. For off-heap code
+ // objects this differs from instruction_end (which would point to the
+ // off-heap trampoline instead).
+ inline Address InstructionEnd();
+ Address OffHeapInstructionEnd();
+
// Returns the size of the instructions, padding, relocation and unwinding
// information.
inline int body_size() const;
@@ -434,9 +367,8 @@ class Code : public HeapObject {
// Layout description.
static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
- static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
- kHandlerTableOffset + kPointerSize;
+ kRelocationInfoOffset + kPointerSize;
static const int kSourcePositionTableOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kProtectedInstructionsOffset =
@@ -447,7 +379,9 @@ class Code : public HeapObject {
kCodeDataContainerOffset + kPointerSize;
static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
- static const int kStubKeyOffset = kSafepointTableOffsetOffset + kIntSize;
+ static const int kHandlerTableOffsetOffset =
+ kSafepointTableOffsetOffset + kIntSize;
+ static const int kStubKeyOffset = kHandlerTableOffsetOffset + kIntSize;
static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
static const int kBuiltinIndexOffset =
kConstantPoolOffset + kConstantPoolSize;
@@ -584,7 +518,7 @@ class AbstractCode : public HeapObject {
inline Object* stack_frame_cache();
static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<NumberDictionary> cache);
+ Handle<SimpleNumberDictionary> cache);
void DropStackFrameCache();
// Returns the size of instructions and the metadata.
@@ -787,7 +721,7 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(constant_pool, FixedArray)
// Accessors for handler table containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
+ DECL_ACCESSORS(handler_table, ByteArray)
// Accessors for source position table containing mappings between byte code
// offset and source position or SourcePositionTableWithFrameCache.
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 16bced9998..5836b01091 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -37,21 +37,21 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
static const int kEntrySize = 3;
};
-class InfoVectorPair {
+class InfoCellPair {
public:
- InfoVectorPair() : shared_(nullptr), vector_cell_(nullptr) {}
- InfoVectorPair(SharedFunctionInfo* shared, Cell* vector_cell)
- : shared_(shared), vector_cell_(vector_cell) {}
+ InfoCellPair() : shared_(nullptr), feedback_cell_(nullptr) {}
+ InfoCellPair(SharedFunctionInfo* shared, FeedbackCell* feedback_cell)
+ : shared_(shared), feedback_cell_(feedback_cell) {}
+ FeedbackCell* feedback_cell() const { return feedback_cell_; }
SharedFunctionInfo* shared() const { return shared_; }
- Cell* vector() const { return vector_cell_; }
+ bool has_feedback_cell() const { return feedback_cell_ != nullptr; }
bool has_shared() const { return shared_ != nullptr; }
- bool has_vector() const { return vector_cell_ != nullptr; }
private:
SharedFunctionInfo* shared_;
- Cell* vector_cell_;
+ FeedbackCell* feedback_cell_;
};
// This cache is used in two different variants. For regexp caching, it simply
@@ -71,12 +71,12 @@ class CompilationCacheTable
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(Handle<String> src, Handle<Context> context,
LanguageMode language_mode);
- InfoVectorPair LookupScript(Handle<String> src, Handle<Context> context,
- LanguageMode language_mode);
- InfoVectorPair LookupEval(Handle<String> src,
- Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- LanguageMode language_mode, int position);
+ MaybeHandle<SharedFunctionInfo> LookupScript(Handle<String> src,
+ Handle<Context> context,
+ LanguageMode language_mode);
+ InfoCellPair LookupEval(Handle<String> src, Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> Put(Handle<CompilationCacheTable> cache,
Handle<String> src,
@@ -86,11 +86,12 @@ class CompilationCacheTable
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value, Handle<Cell> literals);
+ Handle<SharedFunctionInfo> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<Cell> literals, int position);
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position);
static Handle<CompilationCacheTable> PutRegExp(
Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index 40c3658e60..f0650479f7 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATA_HANDLER_INL_H_
-#define V8_DATA_HANDLER_INL_H_
+#ifndef V8_OBJECTS_DATA_HANDLER_INL_H_
+#define V8_OBJECTS_DATA_HANDLER_INL_H_
#include "src/objects/data-handler.h"
@@ -38,4 +38,4 @@ ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
#include "src/objects/object-macros-undef.h"
-#endif // V8_DATA_HANDLER_INL_H_
+#endif // V8_OBJECTS_DATA_HANDLER_INL_H_
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index f11d00fa38..8b3298207f 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATA_HANDLER_H_
-#define V8_DATA_HANDLER_H_
+#ifndef V8_OBJECTS_DATA_HANDLER_H_
+#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects.h"
@@ -60,4 +60,4 @@ class DataHandler : public Struct {
#include "src/objects/object-macros-undef.h"
-#endif // V8_DATA_HANDLER_H_
+#endif // V8_OBJECTS_DATA_HANDLER_H_
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 002ac5215d..084ea7b15c 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -28,7 +28,7 @@ ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionOffset)
-ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsOffset)
+ACCESSORS(BreakPointInfo, break_points, Object, kBreakPointsOffset)
SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index c0425fca8a..6505ca6e7f 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -23,12 +23,30 @@ bool DebugInfo::ClearBreakInfo() {
set_debug_bytecode_array(isolate->heap()->undefined_value());
set_break_points(isolate->heap()->empty_fixed_array());
- int new_flags = flags() & ~kHasBreakInfo & ~kPreparedForBreakpoints;
+ int new_flags = flags();
+ new_flags &= ~kHasBreakInfo & ~kPreparedForBreakpoints;
+ new_flags &= ~kBreakAtEntry & ~kCanBreakAtEntry;
set_flags(new_flags);
return new_flags == kNone;
}
+void DebugInfo::SetBreakAtEntry() {
+ DCHECK(CanBreakAtEntry());
+ set_flags(flags() | kBreakAtEntry);
+}
+
+void DebugInfo::ClearBreakAtEntry() {
+ DCHECK(CanBreakAtEntry());
+ set_flags(flags() & ~kBreakAtEntry);
+}
+
+bool DebugInfo::BreakAtEntry() const { return (flags() & kBreakAtEntry) != 0; }
+
+bool DebugInfo::CanBreakAtEntry() const {
+ return (flags() & kCanBreakAtEntry) != 0;
+}
+
// Check if there is a break point at this source position.
bool DebugInfo::HasBreakPoint(int source_position) {
DCHECK(HasBreakInfo());
@@ -45,14 +63,12 @@ bool DebugInfo::HasBreakPoint(int source_position) {
Object* DebugInfo::GetBreakPointInfo(int source_position) {
DCHECK(HasBreakInfo());
Isolate* isolate = GetIsolate();
- if (!break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->source_position() == source_position) {
- return break_point_info;
- }
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ if (break_point_info->source_position() == source_position) {
+ return break_point_info;
}
}
}
@@ -60,18 +76,16 @@ Object* DebugInfo::GetBreakPointInfo(int source_position) {
}
bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
- if (debug_info->break_points()->IsUndefined(isolate)) return false;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- BreakPointInfo::ClearBreakPoint(break_point_info, break_point_object);
+ if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
+ BreakPointInfo::ClearBreakPoint(break_point_info, break_point);
return true;
}
}
@@ -79,14 +93,14 @@ bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
}
void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
Handle<Object> break_point_info(
debug_info->GetBreakPointInfo(source_position), isolate);
if (!break_point_info->IsUndefined(isolate)) {
BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info), break_point_object);
+ Handle<BreakPointInfo>::cast(break_point_info), break_point);
return;
}
@@ -102,8 +116,8 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
}
if (index == kNoBreakPointInfo) {
// No free slot - extend break point info array.
- Handle<FixedArray> old_break_points = Handle<FixedArray>(
- FixedArray::cast(debug_info->break_points()), isolate);
+ Handle<FixedArray> old_break_points =
+ Handle<FixedArray>(debug_info->break_points(), isolate);
Handle<FixedArray> new_break_points = isolate->factory()->NewFixedArray(
old_break_points->length() +
DebugInfo::kEstimatedNofBreakPointsInFunction);
@@ -119,27 +133,26 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info =
isolate->factory()->NewBreakPointInfo(source_position);
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(new_break_point_info, break_point);
debug_info->break_points()->set(index, *new_break_point_info);
}
// Get the break point objects for a source position.
-Handle<Object> DebugInfo::GetBreakPointObjects(int source_position) {
+Handle<Object> DebugInfo::GetBreakPoints(int source_position) {
DCHECK(HasBreakInfo());
Object* break_point_info = GetBreakPointInfo(source_position);
Isolate* isolate = GetIsolate();
if (break_point_info->IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
- return Handle<Object>(
- BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
+ return Handle<Object>(BreakPointInfo::cast(break_point_info)->break_points(),
+ isolate);
}
// Get the total number of break points.
int DebugInfo::GetBreakPointCount() {
DCHECK(HasBreakInfo());
Isolate* isolate = GetIsolate();
- if (break_points()->IsUndefined(isolate)) return 0;
int count = 0;
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
@@ -151,19 +164,16 @@ int DebugInfo::GetBreakPointCount() {
return count;
}
-Handle<Object> DebugInfo::FindBreakPointInfo(
- Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
+Handle<Object> DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
- if (!debug_info->break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
- Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return break_point_info;
- }
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
+ return break_point_info;
}
}
}
@@ -187,40 +197,34 @@ bool DebugInfo::ClearCoverageInfo() {
}
namespace {
-bool IsEqual(Object* break_point1, Object* break_point2) {
- // TODO(kozyatinskiy): remove non-BreakPoint logic once the JS debug API has
- // been removed.
- if (break_point1->IsBreakPoint() != break_point2->IsBreakPoint())
- return false;
- if (!break_point1->IsBreakPoint()) return break_point1 == break_point2;
- return BreakPoint::cast(break_point1)->id() ==
- BreakPoint::cast(break_point2)->id();
+bool IsEqual(BreakPoint* break_point1, BreakPoint* break_point2) {
+ return break_point1->id() == break_point2->id();
}
} // namespace
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
+ if (break_point_info->break_points()->IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- if (IsEqual(break_point_info->break_point_objects(), *break_point_object)) {
- break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
+ if (!break_point_info->break_points()->IsFixedArray()) {
+ if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
+ *break_point)) {
+ break_point_info->set_break_points(isolate->heap()->undefined_value());
}
return;
}
// If there are multiple break points shrink the array
- DCHECK(break_point_info->break_point_objects()->IsFixedArray());
- Handle<FixedArray> old_array = Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
+ DCHECK(break_point_info->break_points()->IsFixedArray());
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() - 1);
int found_count = 0;
for (int i = 0; i < old_array->length(); i++) {
- if (IsEqual(old_array->get(i), *break_point_object)) {
+ if (IsEqual(BreakPoint::cast(old_array->get(i)), *break_point)) {
DCHECK_EQ(found_count, 0);
found_count++;
} else {
@@ -228,61 +232,60 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
}
}
// If the break point was found in the list change it.
- if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
+ if (found_count > 0) break_point_info->set_break_points(*new_array);
}
// Add the specified break point object.
void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = break_point_info->GetIsolate();
// If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
- break_point_info->set_break_point_objects(*break_point_object);
+ if (break_point_info->break_points()->IsUndefined(isolate)) {
+ break_point_info->set_break_points(*break_point);
return;
}
// If the break point object is the same as before just ignore.
- if (break_point_info->break_point_objects() == *break_point_object) return;
+ if (break_point_info->break_points() == *break_point) return;
// If there was one break point object before replace with array.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ if (!break_point_info->break_points()->IsFixedArray()) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
- array->set(0, break_point_info->break_point_objects());
- array->set(1, *break_point_object);
- break_point_info->set_break_point_objects(*array);
+ array->set(0, break_point_info->break_points());
+ array->set(1, *break_point);
+ break_point_info->set_break_points(*array);
return;
}
// If there was more than one break point before extend array.
- Handle<FixedArray> old_array = Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
- if (IsEqual(old_array->get(i), *break_point_object)) return;
+ if (IsEqual(BreakPoint::cast(old_array->get(i)), *break_point)) return;
new_array->set(i, old_array->get(i));
}
// Add the new break point.
- new_array->set(old_array->length(), *break_point_object);
- break_point_info->set_break_point_objects(*new_array);
+ new_array->set(old_array->length(), *break_point);
+ break_point_info->set_break_points(*new_array);
}
-bool BreakPointInfo::HasBreakPointObject(
- Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+bool BreakPointInfo::HasBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<BreakPoint> break_point) {
// No break point.
Isolate* isolate = break_point_info->GetIsolate();
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+ if (break_point_info->break_points()->IsUndefined(isolate)) {
return false;
}
// Single break point.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- return IsEqual(break_point_info->break_point_objects(),
- *break_point_object);
+ if (!break_point_info->break_points()->IsFixedArray()) {
+ return IsEqual(BreakPoint::cast(break_point_info->break_points()),
+ *break_point);
}
// Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
+ FixedArray* array = FixedArray::cast(break_point_info->break_points());
for (int i = 0; i < array->length(); i++) {
- if (IsEqual(array->get(i), *break_point_object)) {
+ if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) {
return true;
}
}
@@ -292,11 +295,11 @@ bool BreakPointInfo::HasBreakPointObject(
// Get the number of break points.
int BreakPointInfo::GetBreakPointCount() {
// No break point.
- if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
+ if (break_points()->IsUndefined(GetIsolate())) return 0;
// Single break point.
- if (!break_point_objects()->IsFixedArray()) return 1;
+ if (!break_points()->IsFixedArray()) return 1;
// Multiple break points.
- return FixedArray::cast(break_point_objects())->length();
+ return FixedArray::cast(break_points())->length();
}
int CoverageInfo::SlotCount() const {
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 0ce134b0b3..767cd7e81b 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
+class BreakPoint;
class BytecodeArray;
// The DebugInfo class holds additional information for a function being
@@ -24,7 +25,9 @@ class DebugInfo : public Struct {
kNone = 0,
kHasBreakInfo = 1 << 0,
kPreparedForBreakpoints = 1 << 1,
- kHasCoverageInfo = 2 << 1,
+ kHasCoverageInfo = 1 << 2,
+ kBreakAtEntry = 1 << 3,
+ kCanBreakAtEntry = 1 << 4
};
typedef base::Flags<Flag> Flags;
@@ -51,6 +54,12 @@ class DebugInfo : public Struct {
// DebugInfo is now empty.
bool ClearBreakInfo();
+ // Accessors to flag whether to break before entering the function.
+ // This is used to break for functions with no source, e.g. builtins.
+ void SetBreakAtEntry();
+ void ClearBreakAtEntry();
+ bool BreakAtEntry() const;
+
// The instrumented bytecode array for functions with break points.
DECL_ACCESSORS(debug_bytecode_array, Object)
@@ -61,15 +70,15 @@ class DebugInfo : public Struct {
bool HasBreakPoint(int source_position);
// Attempt to clear a break point. Return true if successful.
static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Set a break point.
static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Get the break point objects for a source position.
- Handle<Object> GetBreakPointObjects(int source_position);
+ Handle<Object> GetBreakPoints(int source_position);
// Find the break point info holding this break point object.
static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Get the number of break points for this function.
int GetBreakPointCount();
@@ -78,6 +87,10 @@ class DebugInfo : public Struct {
inline BytecodeArray* OriginalBytecodeArray();
inline BytecodeArray* DebugBytecodeArray();
+ // Returns whether we should be able to break before entering the function.
+ // This is true for functions with no source, e.g. builtins.
+ bool CanBreakAtEntry() const;
+
// --- Block Coverage ---
// ----------------------
@@ -122,17 +135,17 @@ class BreakPointInfo : public Tuple2 {
// The position in the source for the break position.
DECL_INT_ACCESSORS(source_position)
// List of related JavaScript break points.
- DECL_ACCESSORS(break_point_objects, Object)
+ DECL_ACCESSORS(break_points, Object)
// Removes a break point.
static void ClearBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Set a break point.
static void SetBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Check if break point info has this break point object.
- static bool HasBreakPointObject(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
+ // Check if break point info has this break point.
+ static bool HasBreakPoint(Handle<BreakPointInfo> info,
+ Handle<BreakPoint> break_point);
// Get the number of break points for this code offset.
int GetBreakPointCount();
@@ -141,7 +154,7 @@ class BreakPointInfo : public Tuple2 {
DECL_CAST(BreakPointInfo)
static const int kSourcePositionOffset = kValue1Offset;
- static const int kBreakPointObjectsOffset = kValue2Offset;
+ static const int kBreakPointsOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 5cf6bfb67d..7cc0e5f5b3 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -228,26 +228,70 @@ class GlobalDictionary
inline void ValueAtPut(int entry, Object* value);
};
-class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
+class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
public:
- static const int kPrefixSize = 1;
- static const int kEntrySize = 3;
-
static inline bool IsMatch(uint32_t key, Object* other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static inline uint32_t Hash(Isolate* isolate, uint32_t key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+};
+
+class NumberDictionaryShape : public NumberDictionaryBaseShape {
+ public:
+ static const int kPrefixSize = 1;
+ static const int kEntrySize = 3;
+
+ static inline int GetMapRootIndex();
+};
+
+class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
+ public:
+ static const bool kHasDetails = false;
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+
+ template <typename Dictionary>
+ static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ UNREACHABLE();
+ }
+
+ template <typename Dictionary>
+ static inline void DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value) {
+ UNREACHABLE();
+ }
static inline int GetMapRootIndex();
};
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+// SimpleNumberDictionary is used to map number to an entry.
+class SimpleNumberDictionary
+ : public Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape> {
+ public:
+ DECL_CAST(SimpleNumberDictionary)
+ // Type specific at put (default NONE attributes is used when adding).
+ MUST_USE_RESULT static Handle<SimpleNumberDictionary> Set(
+ Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value);
+
+ static const int kEntryValueIndex = 1;
+};
+
extern template class EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Dictionary<NumberDictionary, NumberDictionaryShape>;
+// NumberDictionary is used as elements backing store and provides a bitfield
+// and stores property details for every entry.
class NumberDictionary
: public Dictionary<NumberDictionary, NumberDictionaryShape> {
public:
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index edca36c92e..bee28d93e2 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -496,6 +496,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
return static_cast<uint8_t>(value);
}
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int value) {
+ UNREACHABLE();
+}
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
return static_cast<ElementType>(value);
@@ -509,6 +519,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
return static_cast<uint8_t>(value);
}
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint32_t value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint32_t value) {
+ UNREACHABLE();
+}
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
return static_cast<ElementType>(DoubleToInt32(value));
@@ -523,6 +543,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
}
template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(double value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(double value) {
+ UNREACHABLE();
+}
+
+template <>
inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
return static_cast<float>(value);
}
@@ -533,6 +563,60 @@ inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
}
template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(int64_t value) {
+ UNREACHABLE();
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(uint64_t value) {
+ UNREACHABLE();
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int64_t value) {
+ return value;
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint64_t value) {
+ return value;
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ if (value->IsSmi()) {
+ return from(Smi::ToInt(*value));
+ }
+ DCHECK(value->IsHeapNumber());
+ return from(HeapNumber::cast(*value)->value());
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ DCHECK(value->IsBigInt());
+ return BigInt::cast(*value)->AsInt64(lossless);
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ DCHECK(value->IsBigInt());
+ return BigInt::cast(*value)->AsUint64(lossless);
+}
+
+template <class Traits>
Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
int index) {
return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
@@ -555,6 +639,20 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
set(index, cast_value);
}
+template <>
+inline void FixedTypedArray<BigInt64ArrayTraits>::SetValue(uint32_t index,
+ Object* value) {
+ DCHECK(value->IsBigInt());
+ set(index, BigInt::cast(value)->AsInt64());
+}
+
+template <>
+inline void FixedTypedArray<BigUint64ArrayTraits>::SetValue(uint32_t index,
+ Object* value) {
+ DCHECK(value->IsBigInt());
+ set(index, BigInt::cast(value)->AsUint64());
+}
+
Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
return handle(Smi::FromInt(scalar), isolate);
}
@@ -592,6 +690,15 @@ Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
+Handle<Object> BigInt64ArrayTraits::ToHandle(Isolate* isolate, int64_t scalar) {
+ return BigInt::FromInt64(isolate, scalar);
+}
+
+Handle<Object> BigUint64ArrayTraits::ToHandle(Isolate* isolate,
+ uint64_t scalar) {
+ return BigInt::FromUint64(isolate, scalar);
+}
+
// static
template <class Traits>
STATIC_CONST_MEMBER_DEFINITION const InstanceType
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 5d78af8799..1861f0c735 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -103,7 +103,8 @@ class FixedArray : public FixedArrayBase {
// Return a grown copy if the index is bigger than the array's length.
static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value);
+ Handle<Object> value,
+ PretenureFlag pretenure = NOT_TENURED);
// Setter that uses write barrier.
inline void set(int index, Object* value);
@@ -466,16 +467,18 @@ class PodArray : public ByteArray {
};
// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4) \
- V(Float32, float32, FLOAT32, float, 4) \
- V(Float64, float64, FLOAT64, double, 8) \
- V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) \
+ V(BigUint64, biguint64, BIGUINT64, uint64_t, 8) \
+ V(BigInt64, bigint64, BIGINT64, int64_t, 8)
class FixedTypedArrayBase : public FixedArrayBase {
public:
@@ -548,6 +551,11 @@ class FixedTypedArray : public FixedTypedArrayBase {
static inline ElementType from(int value);
static inline ElementType from(uint32_t value);
static inline ElementType from(double value);
+ static inline ElementType from(int64_t value);
+ static inline ElementType from(uint64_t value);
+
+ static inline ElementType FromHandle(Handle<Object> value,
+ bool* lossless = nullptr);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 9688717e76..6b8e18014a 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -12,6 +12,7 @@
#include "src/api.h"
#include "src/factory.h"
+#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 1128e190b2..6bba2f0054 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -204,15 +204,6 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
-bool JSTypedArray::HasJSTypedArrayPrototype(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- Object* proto = map()->prototype();
- if (!proto->IsJSObject()) return false;
-
- JSObject* proto_obj = JSObject::cast(proto);
- return proto_obj->map()->prototype() == *isolate->typed_array_prototype();
-}
-
// static
MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -236,26 +227,6 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-// static
-Handle<JSFunction> JSTypedArray::DefaultConstructor(
- Isolate* isolate, Handle<JSTypedArray> exemplar) {
- Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
- switch (exemplar->type()) {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: { \
- default_ctor = isolate->type##_array_fun(); \
- break; \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CTOR)
-#undef TYPED_ARRAY_CTOR
- default:
- UNREACHABLE();
- }
-
- return default_ctor;
-}
-
#ifdef VERIFY_HEAP
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 806c275c8f..09a54b38c1 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -299,22 +299,9 @@ class JSTypedArray : public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
- inline bool HasJSTypedArrayPrototype(Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
- static inline Handle<JSFunction> DefaultConstructor(
- Isolate* isolate, Handle<JSTypedArray> exemplar);
- // ES7 section 22.2.4.6 Create ( constructor, argumentList )
- static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
- Handle<Object> default_ctor, int argc,
- Handle<Object>* argv,
- const char* method_name);
- // ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
- static MaybeHandle<JSTypedArray> SpeciesCreate(Isolate* isolate,
- Handle<JSTypedArray> exemplar,
- int argc, Handle<Object>* argv,
- const char* method_name);
// Dispatched behavior.
DECL_PRINTER(JSTypedArray)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
new file mode 100644
index 0000000000..afe297b880
--- /dev/null
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROMISE_INL_H_
+#define V8_OBJECTS_JS_PROMISE_INL_H_
+
+#include "src/objects.h"
+#include "src/objects/js-promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+CAST_ACCESSOR(JSPromise)
+
+ACCESSORS(JSPromise, reactions_or_result, Object, kReactionsOrResultOffset)
+SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
+BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
+BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
+
+Object* JSPromise::result() const {
+ DCHECK_NE(Promise::kPending, status());
+ return reactions_or_result();
+}
+
+Object* JSPromise::reactions() const {
+ DCHECK_EQ(Promise::kPending, status());
+ return reactions_or_result();
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROMISE_INL_H_
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
new file mode 100644
index 0000000000..b454084b8e
--- /dev/null
+++ b/deps/v8/src/objects/js-promise.h
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROMISE_H_
+#define V8_OBJECTS_JS_PROMISE_H_
+
+#include "src/objects.h"
+#include "src/objects/promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Representation of promise objects in the specification. Our layout of
+// JSPromise differs a bit from the layout in the specification, for example
+// there's only a single list of PromiseReaction objects, instead of separate
+// lists for fulfill and reject reactions. The PromiseReaction carries both
+// callbacks from the start, and is eventually morphed into the proper kind of
+// PromiseReactionJobTask when the JSPromise is settled.
+//
+// We also overlay the result and reactions fields on the JSPromise, since
+// the reactions are only necessary for pending promises, whereas the result
+// is only meaningful for settled promises.
+class JSPromise : public JSObject {
+ public:
+ // [reactions_or_result]: Smi 0 terminated list of PromiseReaction objects
+ // in case the JSPromise was not settled yet, otherwise the result.
+ DECL_ACCESSORS(reactions_or_result, Object)
+
+ // [result]: Checks that the promise is settled and returns the result.
+ inline Object* result() const;
+
+ // [reactions]: Checks that the promise is pending and returns the reactions.
+ inline Object* reactions() const;
+
+ DECL_INT_ACCESSORS(flags)
+
+ // [has_handler]: Whether this promise has a reject handler or not.
+ DECL_BOOLEAN_ACCESSORS(has_handler)
+
+ // [handled_hint]: Whether this promise will be handled by a catch
+ // block in an async function.
+ DECL_BOOLEAN_ACCESSORS(handled_hint)
+
+ static const char* Status(Promise::PromiseState status);
+ Promise::PromiseState status() const;
+ void set_status(Promise::PromiseState status);
+
+ // ES section #sec-fulfillpromise
+ static Handle<Object> Fulfill(Handle<JSPromise> promise,
+ Handle<Object> value);
+ // ES section #sec-rejectpromise
+ static Handle<Object> Reject(Handle<JSPromise> promise, Handle<Object> reason,
+ bool debug_event = true);
+ // ES section #sec-promise-resolve-functions
+ MUST_USE_RESULT static MaybeHandle<Object> Resolve(Handle<JSPromise> promise,
+ Handle<Object> resolution);
+
+ // This is a helper that extracts the JSPromise from the input
+ // {object}, which is used as a payload for PromiseReaction and
+ // PromiseReactionJobTask.
+ MUST_USE_RESULT static MaybeHandle<JSPromise> From(Handle<HeapObject> object);
+
+ DECL_CAST(JSPromise)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSPromise)
+ DECL_VERIFIER(JSPromise)
+
+ // Layout description.
+ static const int kReactionsOrResultOffset = JSObject::kHeaderSize;
+ static const int kFlagsOffset = kReactionsOrResultOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::Promise::kEmbedderFieldCount * kPointerSize;
+
+ // Flags layout.
+ // The first two bits store the v8::Promise::PromiseState.
+ static const int kStatusBits = 2;
+ static const int kHasHandlerBit = 2;
+ static const int kHandledHintBit = 3;
+
+ static const int kStatusShift = 0;
+ static const int kStatusMask = 0x3;
+ STATIC_ASSERT(v8::Promise::kPending == 0);
+ STATIC_ASSERT(v8::Promise::kFulfilled == 1);
+ STATIC_ASSERT(v8::Promise::kRejected == 2);
+
+ private:
+ // ES section #sec-triggerpromisereactions
+ static Handle<Object> TriggerPromiseReactions(Isolate* isolate,
+ Handle<Object> reactions,
+ Handle<Object> argument,
+ PromiseReaction::Type type);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROMISE_H_
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 69cd5c3104..6a0c4e3391 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -48,7 +48,7 @@ class JSRegExp : public JSObject {
};
typedef base::Flags<Flag> Flags;
- static int FlagCount() { return 6; }
+ static constexpr int FlagCount() { return 6; }
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 34a427c67b..fa9fcedaab 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITERAL_OBJECTS_INL_H_
-#define V8_LITERAL_OBJECTS_INL_H_
+#ifndef V8_OBJECTS_LITERAL_OBJECTS_INL_H_
+#define V8_OBJECTS_LITERAL_OBJECTS_INL_H_
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
@@ -48,4 +48,4 @@ ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
#include "src/objects/object-macros-undef.h"
-#endif // V8_LITERAL_OBJECTS_INL_H_
+#endif // V8_OBJECTS_LITERAL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 13f8b00878..ab673aad80 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -423,6 +423,10 @@ void ClassBoilerplate::AddToElementsTemplate(
Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
Isolate* isolate, ClassLiteral* expr) {
+ // Create a non-caching handle scope to ensure that the temporary handle used
+ // by ObjectDescriptor for passing Smis around does not corrupt handle cache
+ // in CanonicalHandleScope.
+ HandleScope scope(isolate);
Factory* factory = isolate->factory();
ObjectDescriptor static_desc;
ObjectDescriptor instance_desc;
@@ -509,11 +513,14 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
case ClassLiteral::Property::SETTER:
value_kind = ClassBoilerplate::kSetter;
break;
- case ClassLiteral::Property::FIELD:
+ case ClassLiteral::Property::PUBLIC_FIELD:
if (property->is_computed_name()) {
++dynamic_argument_index;
}
continue;
+ case ClassLiteral::Property::PRIVATE_FIELD:
+ DCHECK(!property->is_computed_name());
+ continue;
}
ObjectDescriptor& desc =
@@ -580,7 +587,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
class_boilerplate->set_instance_computed_properties(
*instance_desc.computed_properties());
- return class_boilerplate;
+ return scope.CloseAndEscape(class_boilerplate);
}
} // namespace internal
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index c78f947b3a..250a998f61 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -503,6 +503,7 @@ bool Map::IsJSObjectMap() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+bool Map::IsJSPromiseMap() const { return instance_type() == JS_PROMISE_TYPE; }
bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
bool Map::IsJSFunctionMap() const {
return instance_type() == JS_FUNCTION_TYPE;
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index bf0d843884..3bc9dd17ff 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -26,6 +26,7 @@ namespace internal {
V(CodeDataContainer) \
V(ConsString) \
V(DataObject) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -713,6 +714,7 @@ class Map : public HeapObject {
inline bool IsPrimitiveMap() const;
inline bool IsJSReceiverMap() const;
inline bool IsJSObjectMap() const;
+ inline bool IsJSPromiseMap() const;
inline bool IsJSArrayMap() const;
inline bool IsJSFunctionMap() const;
inline bool IsStringMap() const;
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
new file mode 100644
index 0000000000..71a9ea20ec
--- /dev/null
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_INL_H_
+#define V8_OBJECTS_MICROTASK_INL_H_
+
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Microtask)
+CAST_ACCESSOR(CallbackTask)
+CAST_ACCESSOR(CallableTask)
+
+ACCESSORS(CallableTask, callable, JSReceiver, kCallableOffset)
+ACCESSORS(CallableTask, context, Context, kContextOffset)
+
+ACCESSORS(CallbackTask, callback, Foreign, kCallbackOffset)
+ACCESSORS(CallbackTask, data, Foreign, kDataOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_INL_H_
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
new file mode 100644
index 0000000000..33f121aa2c
--- /dev/null
+++ b/deps/v8/src/objects/microtask.h
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_H_
+#define V8_OBJECTS_MICROTASK_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Abstract base class for all microtasks that can be scheduled on the
+// microtask queue. This class merely serves the purpose of a marker
+// interface.
+class Microtask : public Struct {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(Microtask)
+ DECL_VERIFIER(Microtask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Microtask);
+};
+
+// A CallbackTask is a special Microtask that allows us to schedule
+// C++ microtask callbacks on the microtask queue. This is heavily
+// used by Blink for example.
+class CallbackTask : public Microtask {
+ public:
+ DECL_ACCESSORS(callback, Foreign)
+ DECL_ACCESSORS(data, Foreign)
+
+ static const int kCallbackOffset = Microtask::kHeaderSize;
+ static const int kDataOffset = kCallbackOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(CallbackTask)
+ DECL_PRINTER(CallbackTask)
+ DECL_VERIFIER(CallbackTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CallbackTask)
+};
+
+// A CallableTask is a special (internal) Microtask that allows us to
+// schedule arbitrary callables on the microtask queue. We use this
+// for various tests of the microtask queue.
+class CallableTask : public Microtask {
+ public:
+ DECL_ACCESSORS(callable, JSReceiver)
+ DECL_ACCESSORS(context, Context)
+
+ static const int kCallableOffset = Microtask::kHeaderSize;
+ static const int kContextOffset = kCallableOffset + kPointerSize;
+ static const int kSize = kContextOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(CallableTask)
+ DECL_PRINTER(CallableTask)
+ DECL_VERIFIER(CallableTask)
+ void BriefPrintDetails(std::ostream& os);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CallableTask);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_H_
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index fe374d3fc6..9cf3bc4d2a 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -24,17 +24,16 @@ class ModuleInfoEntry;
class String;
class Zone;
-// A Module object is a mapping from export names to cells
-// This is still very much in flux.
+// The runtime representation of an ECMAScript module.
class Module : public Struct {
public:
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
- // The code representing this Module, or an abstraction thereof.
- // This is either a SharedFunctionInfo or a JSFunction or a ModuleInfo
- // depending on whether the module has been instantiated and evaluated. See
+ // The code representing this module, or an abstraction thereof.
+ // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
+ // a ModuleInfo, depending on the state (status) the module is in. See
// Module::ModuleVerify() for the precise invariant.
DECL_ACCESSORS(code, Object)
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index d59a3f54a3..b4ebeb632b 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -27,6 +27,20 @@ BOOL_ACCESSORS(Symbol, flags, is_interesting_symbol, kInterestingSymbolBit)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+bool Symbol::is_private_field() const {
+ bool value = BooleanBit::get(flags(), kPrivateFieldBit);
+ DCHECK_IMPLIES(value, is_private());
+ return value;
+}
+
+void Symbol::set_is_private_field() {
+ int old_value = flags();
+ // TODO(gsathya): Re-order the bits to have these next to each other
+ // and just do the bit shifts once.
+ set_flags(BooleanBit::set(old_value, kPrivateBit, true) |
+ BooleanBit::set(old_value, kPrivateFieldBit, true));
+}
+
bool Name::IsUniqueName() const {
uint32_t type = map()->instance_type();
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
@@ -89,6 +103,13 @@ bool Name::IsPrivate() {
return this->IsSymbol() && Symbol::cast(this)->is_private();
}
+bool Name::IsPrivateField() {
+ bool is_private_field =
+ this->IsSymbol() && Symbol::cast(this)->is_private_field();
+ DCHECK_IMPLIES(is_private_field, IsPrivate());
+ return is_private_field;
+}
+
bool Name::AsArrayIndex(uint32_t* index) {
return IsString() && String::cast(this)->AsArrayIndex(index);
}
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index dd5b3692f9..e5cfe7733b 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -44,6 +44,10 @@ class Name : public HeapObject {
// If the name is private, it can only name own properties.
inline bool IsPrivate();
+ // If the name is a private field, it should behave like a private
+ // symbol but also throw on property access miss.
+ inline bool IsPrivateField();
+
inline bool IsUniqueName() const;
static inline bool ContainsCachedArrayIndex(uint32_t hash);
@@ -160,6 +164,14 @@ class Symbol : public Name {
// Symbol.keyFor on such a symbol simply needs to return the attached name.
DECL_BOOLEAN_ACCESSORS(is_public)
+ // [is_private_field]: Whether this is a private field. Private fields
+ // are the same as private symbols except they throw on missing
+ // property access.
+ //
+ // This also sets the is_private bit.
+ inline bool is_private_field() const;
+ inline void set_is_private_field();
+
DECL_CAST(Symbol)
// Dispatched behavior.
@@ -176,6 +188,7 @@ class Symbol : public Name {
static const int kWellKnownSymbolBit = 1;
static const int kPublicBit = 2;
static const int kInterestingSymbolBit = 3;
+ static const int kPrivateFieldBit = 4;
typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
// No weak fields.
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index f81dc29504..d8ca9355ad 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
#undef DECL_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 604942a272..52835bce9b 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -7,6 +7,8 @@
// Note 2: This file is deliberately missing the include guards (the undeffing
// approach wouldn't work otherwise).
+//
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
// The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
// for fields that can be written to and read from multiple threads at the same
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
new file mode 100644
index 0000000000..4283f0aa19
--- /dev/null
+++ b/deps/v8/src/objects/promise-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROMISE_INL_H_
+#define V8_OBJECTS_PROMISE_INL_H_
+
+#include "src/objects/promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PromiseCapability)
+CAST_ACCESSOR(PromiseReaction)
+CAST_ACCESSOR(PromiseReactionJobTask)
+CAST_ACCESSOR(PromiseFulfillReactionJobTask)
+CAST_ACCESSOR(PromiseRejectReactionJobTask)
+CAST_ACCESSOR(PromiseResolveThenableJobTask)
+
+ACCESSORS(PromiseReaction, next, Object, kNextOffset)
+ACCESSORS(PromiseReaction, reject_handler, HeapObject, kRejectHandlerOffset)
+ACCESSORS(PromiseReaction, fulfill_handler, HeapObject, kFulfillHandlerOffset)
+ACCESSORS(PromiseReaction, payload, HeapObject, kPayloadOffset)
+
+ACCESSORS(PromiseResolveThenableJobTask, context, Context, kContextOffset)
+ACCESSORS(PromiseResolveThenableJobTask, promise_to_resolve, JSPromise,
+ kPromiseToResolveOffset)
+ACCESSORS(PromiseResolveThenableJobTask, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseResolveThenableJobTask, thenable, JSReceiver, kThenableOffset)
+
+ACCESSORS(PromiseReactionJobTask, context, Context, kContextOffset)
+ACCESSORS(PromiseReactionJobTask, argument, Object, kArgumentOffset);
+ACCESSORS(PromiseReactionJobTask, handler, HeapObject, kHandlerOffset);
+ACCESSORS(PromiseReactionJobTask, payload, HeapObject, kPayloadOffset);
+
+ACCESSORS(PromiseCapability, promise, HeapObject, kPromiseOffset)
+ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
+ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROMISE_INL_H_
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
new file mode 100644
index 0000000000..36ef4afe1d
--- /dev/null
+++ b/deps/v8/src/objects/promise.h
@@ -0,0 +1,168 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROMISE_H_
+#define V8_OBJECTS_PROMISE_H_
+
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Struct to hold state required for PromiseReactionJob. See the comment on the
+// PromiseReaction below for details on how this is being managed to reduce the
+// memory and allocation overhead. This is the base class for the concrete
+//
+// - PromiseFulfillReactionJobTask
+// - PromiseRejectReactionJobTask
+//
+// classes, which are used to represent either reactions, and we distinguish
+// them by their instance types.
+class PromiseReactionJobTask : public Microtask {
+ public:
+ DECL_ACCESSORS(argument, Object)
+ DECL_ACCESSORS(context, Context)
+ // [handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(handler, HeapObject)
+ // [payload]: Usually a JSPromise or a PromiseCapability.
+ DECL_ACCESSORS(payload, HeapObject)
+
+ static const int kArgumentOffset = Microtask::kHeaderSize;
+ static const int kContextOffset = kArgumentOffset + kPointerSize;
+ static const int kHandlerOffset = kContextOffset + kPointerSize;
+ static const int kPayloadOffset = kHandlerOffset + kPointerSize;
+ static const int kSize = kPayloadOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseReactionJobTask)
+ DECL_VERIFIER(PromiseReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobTask);
+};
+
+// Struct to hold state required for a PromiseReactionJob of type "Fulfill".
+class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(PromiseFulfillReactionJobTask)
+ DECL_PRINTER(PromiseFulfillReactionJobTask)
+ DECL_VERIFIER(PromiseFulfillReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseFulfillReactionJobTask);
+};
+
+// Struct to hold state required for a PromiseReactionJob of type "Reject".
+class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(PromiseRejectReactionJobTask)
+ DECL_PRINTER(PromiseRejectReactionJobTask)
+ DECL_VERIFIER(PromiseRejectReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseRejectReactionJobTask);
+};
+
+// A container struct to hold state required for PromiseResolveThenableJob.
+class PromiseResolveThenableJobTask : public Microtask {
+ public:
+ DECL_ACCESSORS(context, Context)
+ DECL_ACCESSORS(promise_to_resolve, JSPromise)
+ DECL_ACCESSORS(then, JSReceiver)
+ DECL_ACCESSORS(thenable, JSReceiver)
+
+ static const int kContextOffset = Microtask::kHeaderSize;
+ static const int kPromiseToResolveOffset = kContextOffset + kPointerSize;
+ static const int kThenOffset = kPromiseToResolveOffset + kPointerSize;
+ static const int kThenableOffset = kThenOffset + kPointerSize;
+ static const int kSize = kThenableOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseResolveThenableJobTask)
+ DECL_PRINTER(PromiseResolveThenableJobTask)
+ DECL_VERIFIER(PromiseResolveThenableJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobTask);
+};
+
+// Struct to hold the state of a PromiseCapability.
+class PromiseCapability : public Struct {
+ public:
+ DECL_ACCESSORS(promise, HeapObject)
+ DECL_ACCESSORS(resolve, Object)
+ DECL_ACCESSORS(reject, Object)
+
+ static const int kPromiseOffset = Struct::kHeaderSize;
+ static const int kResolveOffset = kPromiseOffset + kPointerSize;
+ static const int kRejectOffset = kResolveOffset + kPointerSize;
+ static const int kSize = kRejectOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseCapability)
+ DECL_PRINTER(PromiseCapability)
+ DECL_VERIFIER(PromiseCapability)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
+};
+
+// A representation of promise reaction. This differs from the specification
+// in that the PromiseReaction here holds both handlers for the fulfill and
+// the reject case. When a JSPromise is eventually resolved (either via
+// fulfilling it or rejecting it), we morph this PromiseReaction object in
+// memory into a proper PromiseReactionJobTask and schedule it on the queue
+// of microtasks. So the size of PromiseReaction and the size of the
+// PromiseReactionJobTask has to be same for this to work.
+//
+// The PromiseReaction::payload field usually holds a JSPromise
+// instance (in the fast case of a native promise) or a PromiseCapability
+// in case of a custom promise. For await we store the JSGeneratorObject
+// here and use custom Code handlers.
+//
+// We need to keep the context in the PromiseReaction so that we can run
+// the default handlers (in case they are undefined) in the proper context.
+//
+// The PromiseReaction objects form a singly-linked list, terminated by
+// Smi 0. On the JSPromise instance they are linked in reverse order,
+// and are turned into the proper order again when scheduling them on
+// the microtask queue.
+class PromiseReaction : public Struct {
+ public:
+ enum Type { kFulfill, kReject };
+
+ DECL_ACCESSORS(next, Object)
+ // [reject_handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(reject_handler, HeapObject)
+ // [fulfill_handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(fulfill_handler, HeapObject)
+ // [payload]: Usually a JSPromise or a PromiseCapability.
+ DECL_ACCESSORS(payload, HeapObject)
+
+ static const int kNextOffset = Struct::kHeaderSize;
+ static const int kRejectHandlerOffset = kNextOffset + kPointerSize;
+ static const int kFulfillHandlerOffset = kRejectHandlerOffset + kPointerSize;
+ static const int kPayloadOffset = kFulfillHandlerOffset + kPointerSize;
+ static const int kSize = kPayloadOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseReaction)
+ DECL_PRINTER(PromiseReaction)
+ DECL_VERIFIER(PromiseReaction)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReaction);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROMISE_H_
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 82f835ff0c..d199d7f6ec 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -781,47 +781,6 @@ void ScopeInfo::ModuleVariable(int i, String** name, int* index,
}
}
-#ifdef DEBUG
-
-static void PrintList(const char* list_name, int nof_internal_slots, int start,
- int end, ScopeInfo* scope_info) {
- if (start < end) {
- PrintF("\n // %s\n", list_name);
- if (nof_internal_slots > 0) {
- PrintF(" %2d - %2d [internal slots]\n", 0, nof_internal_slots - 1);
- }
- for (int i = nof_internal_slots; start < end; ++i, ++start) {
- PrintF(" %2d ", i);
- String::cast(scope_info->get(start))->ShortPrint();
- PrintF("\n");
- }
- }
-}
-
-void ScopeInfo::Print() {
- PrintF("ScopeInfo ");
- if (HasFunctionName()) {
- FunctionName()->ShortPrint();
- } else {
- PrintF("/* no function name */");
- }
- PrintF("{");
-
- if (length() > 0) {
- PrintList("parameters", 0, ParameterNamesIndex(),
- ParameterNamesIndex() + ParameterCount(), this);
- PrintList("stack slots", 0, StackLocalNamesIndex(),
- StackLocalNamesIndex() + StackLocalCount(), this);
- PrintList("context slots", Context::MIN_CONTEXT_SLOTS,
- ContextLocalNamesIndex(),
- ContextLocalNamesIndex() + ContextLocalCount(), this);
- // TODO(neis): Print module stuff if present.
- }
-
- PrintF("}\n");
-}
-#endif // DEBUG
-
Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
Handle<Object> export_name,
Handle<Object> local_name,
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 3a8459a204..0532686ba0 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -35,6 +35,7 @@ class Zone;
class ScopeInfo : public FixedArray {
public:
DECL_CAST(ScopeInfo)
+ DECL_PRINTER(ScopeInfo)
// Return the type of this scope.
ScopeType scope_type();
@@ -187,10 +188,6 @@ class ScopeInfo : public FixedArray {
// Serializes empty scope info.
V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
-#ifdef DEBUG
- void Print();
-#endif
-
// The layout of the static part of a ScopeInfo is as follows. Each entry is
// numeric and occupies one array slot.
// 1. A set of properties of the scope.
@@ -307,7 +304,7 @@ class ScopeInfo : public FixedArray {
class HasSimpleParametersField
: public BitField<bool, AsmModuleField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 11> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 5> {};
class HasOuterScopeInfoField
: public BitField<bool, FunctionKindField::kNext, 1> {};
class IsDebugEvaluateScopeField
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 57a72754b5..2f3b32f17c 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -27,8 +27,6 @@ ACCESSORS(SharedFunctionInfo, raw_name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
kFeedbackMetadataOffset)
-ACCESSORS(SharedFunctionInfo, instance_class_name, String,
- kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
@@ -127,9 +125,10 @@ FunctionKind SharedFunctionInfo::kind() const {
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
int hints = compiler_hints();
hints = FunctionKindBits::update(hints, kind);
+ hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
+ hints = IsDerivedConstructorBit::update(hints, IsDerivedConstructor(kind));
set_compiler_hints(hints);
UpdateFunctionMapIndex();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 8e996042c0..077088dd28 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -136,9 +136,6 @@ class SharedFunctionInfo : public HeapObject {
DECL_INT_ACCESSORS(unique_id)
#endif
- // [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, String)
-
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
@@ -342,7 +339,11 @@ class SharedFunctionInfo : public HeapObject {
static Handle<Object> GetSourceCode(Handle<SharedFunctionInfo> shared);
static Handle<Object> GetSourceCodeHarmony(Handle<SharedFunctionInfo> shared);
- // Tells whether this function should be subject to debugging.
+ // Tells whether this function should be subject to debugging, e.g. for
+ // - scope inspection
+ // - internal break points
+ // - coverage and type profile
+ // - error stack trace
inline bool IsSubjectToDebugging();
// Whether this function is defined in user-provided JavaScript code.
@@ -424,7 +425,6 @@ class SharedFunctionInfo : public HeapObject {
V(kScopeInfoOffset, kPointerSize) \
V(kOuterScopeInfoOffset, kPointerSize) \
V(kConstructStubOffset, kPointerSize) \
- V(kInstanceClassNameOffset, kPointerSize) \
V(kFunctionDataOffset, kPointerSize) \
V(kScriptOffset, kPointerSize) \
V(kDebugInfoOffset, kPointerSize) \
@@ -469,7 +469,9 @@ class SharedFunctionInfo : public HeapObject {
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(IsWrappedBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 11, _) \
+ V(IsClassConstructorBit, bool, 1, _) \
+ V(IsDerivedConstructorBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 5, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
@@ -487,12 +489,6 @@ class SharedFunctionInfo : public HeapObject {
DisabledOptimizationReasonBits::kMax);
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
- // Masks for checking if certain FunctionKind bits are set without fully
- // decoding of the FunctionKind bit field.
- static const int kClassConstructorMask = FunctionKind::kClassConstructor
- << FunctionKindBits::kShift;
- static const int kDerivedConstructorMask = FunctionKind::kDerivedConstructor
- << FunctionKindBits::kShift;
// Bit positions in |debugger_hints|.
#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 066fc6d879..dee56fb7f7 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -314,7 +314,7 @@ class String : public Name {
uint32_t inline ToValidIndex(Object* number);
// Trimming.
- enum TrimMode { kTrim, kTrimLeft, kTrimRight };
+ enum TrimMode { kTrim, kTrimStart, kTrimEnd };
static Handle<String> Trim(Handle<String> string, TrimMode mode);
DECL_CAST(String)
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 24f306aa68..e35f3f137b 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -12,117 +12,41 @@
namespace v8 {
namespace internal {
-bool TemplateObjectDescription::Equals(
- TemplateObjectDescription const* that) const {
- if (this->raw_strings()->length() == that->raw_strings()->length()) {
- for (int i = this->raw_strings()->length(); --i >= 0;) {
- if (this->raw_strings()->get(i) != that->raw_strings()->get(i)) {
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
// static
-Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<Context> native_context) {
- DCHECK(native_context->IsNativeContext());
- Isolate* const isolate = native_context->GetIsolate();
-
- // Check if we already have a [[TemplateMap]] for the {native_context},
- // and if not, just allocate one on the fly (which will be set below).
- Handle<TemplateMap> template_map =
- native_context->template_map()->IsUndefined(isolate)
- ? TemplateMap::New(isolate)
- : handle(TemplateMap::cast(native_context->template_map()), isolate);
-
- // Check if we already have an appropriate entry.
- Handle<JSArray> template_object;
- if (!TemplateMap::Lookup(template_map, description)
- .ToHandle(&template_object)) {
- // Create the raw object from the {raw_strings}.
- Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
- Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
- raw_strings, PACKED_ELEMENTS, raw_strings->length(), TENURED);
-
- // Create the template object from the {cooked_strings}.
- Handle<FixedArray> cooked_strings(description->cooked_strings(), isolate);
- template_object = isolate->factory()->NewJSArrayWithElements(
- cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
-
- // Freeze the {raw_object}.
- JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
-
- // Install a "raw" data property for {raw_object} on {template_object}.
- PropertyDescriptor raw_desc;
- raw_desc.set_value(raw_object);
- raw_desc.set_configurable(false);
- raw_desc.set_enumerable(false);
- raw_desc.set_writable(false);
- JSArray::DefineOwnProperty(isolate, template_object,
- isolate->factory()->raw_string(), &raw_desc,
- kThrowOnError)
- .ToChecked();
-
- // Freeze the {template_object} as well.
- JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
- .ToChecked();
-
- // Remember the {template_object} in the {template_map}.
- template_map = TemplateMap::Add(template_map, description, template_object);
- native_context->set_template_map(*template_map);
- }
+Handle<JSArray> TemplateObjectDescription::CreateTemplateObject(
+ Handle<TemplateObjectDescription> description) {
+ Isolate* const isolate = description->GetIsolate();
+
+ // Create the raw object from the {raw_strings}.
+ Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
+ Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
+ raw_strings, PACKED_ELEMENTS, raw_strings->length(), TENURED);
+
+ // Create the template object from the {cooked_strings}.
+ Handle<FixedArray> cooked_strings(description->cooked_strings(), isolate);
+ Handle<JSArray> template_object = isolate->factory()->NewJSArrayWithElements(
+ cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
+
+ // Freeze the {raw_object}.
+ JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
+
+ // Install a "raw" data property for {raw_object} on {template_object}.
+ PropertyDescriptor raw_desc;
+ raw_desc.set_value(raw_object);
+ raw_desc.set_configurable(false);
+ raw_desc.set_enumerable(false);
+ raw_desc.set_writable(false);
+ JSArray::DefineOwnProperty(isolate, template_object,
+ isolate->factory()->raw_string(), &raw_desc,
+ kThrowOnError)
+ .ToChecked();
+
+ // Freeze the {template_object} as well.
+ JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
+ .ToChecked();
return template_object;
}
-// static
-bool TemplateMapShape::IsMatch(TemplateObjectDescription* key, Object* value) {
- return key->Equals(TemplateObjectDescription::cast(value));
-}
-
-// static
-uint32_t TemplateMapShape::Hash(Isolate* isolate,
- TemplateObjectDescription* key) {
- return key->hash();
-}
-
-// static
-uint32_t TemplateMapShape::HashForObject(Isolate* isolate, Object* object) {
- return Hash(isolate, TemplateObjectDescription::cast(object));
-}
-
-// static
-Handle<TemplateMap> TemplateMap::New(Isolate* isolate) {
- return HashTable::New(isolate, 0);
-}
-
-// static
-MaybeHandle<JSArray> TemplateMap::Lookup(
- Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key) {
- int const entry = template_map->FindEntry(*key);
- if (entry == kNotFound) return MaybeHandle<JSArray>();
- int const index = EntryToIndex(entry);
- return handle(JSArray::cast(template_map->get(index + 1)));
-}
-
-// static
-Handle<TemplateMap> TemplateMap::Add(Handle<TemplateMap> template_map,
- Handle<TemplateObjectDescription> key,
- Handle<JSArray> value) {
- DCHECK_EQ(kNotFound, template_map->FindEntry(*key));
- template_map = EnsureCapacity(template_map, 1);
- uint32_t const hash = ShapeT::Hash(key->GetIsolate(), *key);
- int const entry = template_map->FindInsertionEntry(hash);
- int const index = EntryToIndex(entry);
- template_map->set(index + 0, *key);
- template_map->set(index + 1, *value);
- template_map->ElementAdded();
- return template_map;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index cac29a3530..6c1a99831a 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -16,61 +16,25 @@ namespace internal {
// TemplateObjectDescription is a triple of hash, raw strings and cooked
// strings for tagged template literals. Used to communicate with the runtime
-// for template object creation within the {Runtime_GetTemplateObject} method.
-class TemplateObjectDescription final : public Tuple3 {
+// for template object creation within the {Runtime_CreateTemplateObject}
+// method.
+class TemplateObjectDescription final : public Tuple2 {
public:
- DECL_INT_ACCESSORS(hash)
DECL_ACCESSORS(raw_strings, FixedArray)
DECL_ACCESSORS(cooked_strings, FixedArray)
- bool Equals(TemplateObjectDescription const* that) const;
-
- static Handle<JSArray> GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<Context> native_context);
+ static Handle<JSArray> CreateTemplateObject(
+ Handle<TemplateObjectDescription> description);
DECL_CAST(TemplateObjectDescription)
- static constexpr int kHashOffset = kValue1Offset;
- static constexpr int kRawStringsOffset = kValue2Offset;
- static constexpr int kCookedStringsOffset = kValue3Offset;
+ static constexpr int kRawStringsOffset = kValue1Offset;
+ static constexpr int kCookedStringsOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateObjectDescription);
};
-class TemplateMapShape final : public BaseShape<TemplateObjectDescription*> {
- public:
- static bool IsMatch(TemplateObjectDescription* key, Object* value);
- static uint32_t Hash(Isolate* isolate, TemplateObjectDescription* key);
- static uint32_t HashForObject(Isolate* isolate, Object* object);
-
- static constexpr int kPrefixSize = 0;
- static constexpr int kEntrySize = 2;
-};
-
-class TemplateMap final : public HashTable<TemplateMap, TemplateMapShape> {
- public:
- static Handle<TemplateMap> New(Isolate* isolate);
-
- // Tries to lookup the given {key} in the {template_map}. Returns the
- // value if it's found, otherwise returns an empty MaybeHandle.
- WARN_UNUSED_RESULT static MaybeHandle<JSArray> Lookup(
- Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key);
-
- // Adds the {key} / {value} pair to the {template_map} and returns the
- // new TemplateMap (we might need to re-allocate). This assumes that
- // there's no entry for {key} in the {template_map} already.
- static Handle<TemplateMap> Add(Handle<TemplateMap> template_map,
- Handle<TemplateObjectDescription> key,
- Handle<JSArray> value);
-
- DECL_CAST(TemplateMap)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateMap);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index fbbdd8b715..24218df199 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -5,7 +5,6 @@ gsathya@chromium.org
littledan@chromium.org
marja@chromium.org
neis@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/parsing/background-parsing-task.cc b/deps/v8/src/parsing/background-parsing-task.cc
deleted file mode 100644
index cb811566df..0000000000
--- a/deps/v8/src/parsing/background-parsing-task.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/parsing/background-parsing-task.h"
-
-#include "src/counters.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/scanner-character-streams.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void StreamedSource::Release() {
- parser.reset();
- info.reset();
-}
-
-BackgroundParsingTask::BackgroundParsingTask(
- StreamedSource* source, ScriptCompiler::CompileOptions options,
- int stack_size, Isolate* isolate)
- : source_(source),
- stack_size_(stack_size),
- script_data_(nullptr),
- timer_(isolate->counters()->compile_script_on_background()) {
- // We don't set the context to the CompilationInfo yet, because the background
- // thread cannot do anything with it anyway. We set it just before compilation
- // on the foreground thread.
- DCHECK(options == ScriptCompiler::kProduceParserCache ||
- options == ScriptCompiler::kProduceCodeCache ||
- options == ScriptCompiler::kProduceFullCodeCache ||
- options == ScriptCompiler::kNoCompileOptions ||
- options == ScriptCompiler::kEagerCompile);
-
- VMState<PARSER> state(isolate);
-
- // Prepare the data for the internalization phase and compilation phase, which
- // will happen in the main thread after parsing.
- ParseInfo* info = new ParseInfo(isolate->allocator());
- info->InitFromIsolate(isolate);
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
- } else {
- info->set_runtime_call_stats(nullptr);
- }
- info->set_toplevel();
- std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(source->source_stream.get(), source->encoding,
- info->runtime_call_stats()));
- info->set_character_stream(std::move(stream));
- info->set_unicode_cache(&source_->unicode_cache);
- info->set_compile_options(options);
- info->set_allow_lazy_parsing();
- if (V8_UNLIKELY(info->block_coverage_enabled())) {
- info->AllocateSourceRangeMap();
- }
- info->set_cached_data(&script_data_);
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- info->set_language_mode(
- stricter_language_mode(info->language_mode(), language_mode));
-
- source->info.reset(info);
- allocator_ = isolate->allocator();
-
- // Parser needs to stay alive for finalizing the parsing on the main
- // thread.
- source_->parser.reset(new Parser(source_->info.get()));
- source_->parser->DeserializeScopeChain(source_->info.get(),
- MaybeHandle<ScopeInfo>());
-}
-
-void BackgroundParsingTask::Run() {
- TimedHistogramScope timer(timer_);
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- source_->info->set_on_background_thread(true);
-
- // Reset the stack limit of the parser to reflect correctly that we're on a
- // background thread.
- uintptr_t old_stack_limit = source_->info->stack_limit();
- uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
- source_->info->set_stack_limit(stack_limit);
- source_->parser->set_stack_limit(stack_limit);
-
- source_->parser->ParseOnBackground(source_->info.get());
- if (FLAG_background_compile && source_->info->literal() != nullptr) {
- // Parsing has succeeded, compile.
- source_->outer_function_job = Compiler::CompileTopLevelOnBackgroundThread(
- source_->info.get(), allocator_, &source_->inner_function_jobs);
- }
-
- if (script_data_ != nullptr) {
- source_->cached_data.reset(new ScriptCompiler::CachedData(
- script_data_->data(), script_data_->length(),
- ScriptCompiler::CachedData::BufferOwned));
- script_data_->ReleaseDataOwnership();
- delete script_data_;
- script_data_ = nullptr;
- }
-
- source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
-
- source_->info->set_on_background_thread(false);
- source_->info->set_stack_limit(old_stack_limit);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/parsing/background-parsing-task.h b/deps/v8/src/parsing/background-parsing-task.h
deleted file mode 100644
index eb3ed61e2e..0000000000
--- a/deps/v8/src/parsing/background-parsing-task.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_BACKGROUND_PARSING_TASK_H_
-#define V8_PARSING_BACKGROUND_PARSING_TASK_H_
-
-#include <memory>
-
-#include "include/v8.h"
-#include "src/base/platform/platform.h"
-#include "src/base/platform/semaphore.h"
-#include "src/compiler.h"
-#include "src/parsing/parse-info.h"
-#include "src/unicode-cache.h"
-
-namespace v8 {
-namespace internal {
-
-class Parser;
-class ScriptData;
-class TimedHistogram;
-
-// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
-// data which needs to be transmitted between threads for background parsing,
-// finalizing it on the main thread, and compiling on the main thread.
-struct StreamedSource {
- StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
- ScriptCompiler::StreamedSource::Encoding encoding)
- : source_stream(source_stream), encoding(encoding) {}
-
- void Release();
-
- // Internal implementation of v8::ScriptCompiler::StreamedSource.
- std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
- ScriptCompiler::StreamedSource::Encoding encoding;
- std::unique_ptr<ScriptCompiler::CachedData> cached_data;
-
- // Data needed for parsing, and data needed to to be passed between thread
- // between parsing and compilation. These need to be initialized before the
- // compilation starts.
- UnicodeCache unicode_cache;
- std::unique_ptr<ParseInfo> info;
- std::unique_ptr<Parser> parser;
-
- // Data needed for finalizing compilation after background compilation.
- std::unique_ptr<CompilationJob> outer_function_job;
- CompilationJobList inner_function_jobs;
-
- // Prevent copying.
- StreamedSource(const StreamedSource&) = delete;
- StreamedSource& operator=(const StreamedSource&) = delete;
-};
-
-class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
- public:
- BackgroundParsingTask(StreamedSource* source,
- ScriptCompiler::CompileOptions options, int stack_size,
- Isolate* isolate);
-
- virtual void Run();
-
- private:
- StreamedSource* source_; // Not owned.
- int stack_size_;
- ScriptData* script_data_;
- AccountingAllocator* allocator_;
- TimedHistogram* timer_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_BACKGROUND_PARSING_TASK_H_
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 709d5736b5..522b650be7 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H
-#define V8_PARSING_EXPRESSION_CLASSIFIER_H
+#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H_
+#define V8_PARSING_EXPRESSION_CLASSIFIER_H_
#include "src/messages.h"
#include "src/parsing/scanner.h"
@@ -433,4 +433,4 @@ class ExpressionClassifier {
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H
+#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H_
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index b8f191dd5a..8657dab7f2 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -20,7 +20,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
: zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
- compile_options_(ScriptCompiler::kNoCompileOptions),
script_scope_(nullptr),
unicode_cache_(nullptr),
stack_limit_(0),
@@ -32,7 +31,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
function_literal_id_(FunctionLiteral::kIdTypeInvalid),
max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
character_stream_(nullptr),
- cached_data_(nullptr),
ast_value_factory_(nullptr),
ast_string_constants_(nullptr),
function_name_(nullptr),
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index e93c7137ca..5a0cf138c1 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -29,7 +29,6 @@ class DeclarationScope;
class FunctionLiteral;
class RuntimeCallStats;
class Logger;
-class ScriptData;
class SourceRangeMap;
class UnicodeCache;
class Utf16CharacterStream;
@@ -85,6 +84,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_on_background_thread)
FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
set_wrapped_as_function)
+ FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -106,20 +106,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension() const { return extension_; }
void set_extension(v8::Extension* extension) { extension_ = extension; }
- ScriptData** cached_data() const { return cached_data_; }
- void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
ConsumedPreParsedScopeData* consumed_preparsed_scope_data() {
return &consumed_preparsed_scope_data_;
}
- ScriptCompiler::CompileOptions compile_options() const {
- return compile_options_;
- }
- void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
- compile_options_ = compile_options;
- }
-
DeclarationScope* script_scope() const { return script_scope_; }
void set_script_scope(DeclarationScope* script_scope) {
script_scope_ = script_scope;
@@ -263,13 +254,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
kIsAsmWasmBroken = 1 << 12,
kOnBackgroundThread = 1 << 13,
kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
+ kAllowEvalCache = 1 << 15,
};
//------------- Inputs to parsing and scope analysis -----------------------
std::shared_ptr<Zone> zone_;
unsigned flags_;
v8::Extension* extension_;
- ScriptCompiler::CompileOptions compile_options_;
DeclarationScope* script_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
@@ -287,7 +278,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
- ScriptData** cached_data_; // used if available, populated if requested.
ConsumedPreParsedScopeData consumed_preparsed_scope_data_;
std::shared_ptr<AstValueFactory> ast_value_factory_;
const class AstStringConstants* ast_string_constants_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index faefe44011..2d608d5f40 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_PARSER_BASE_H
-#define V8_PARSING_PARSER_BASE_H
+#ifndef V8_PARSING_PARSER_BASE_H_
+#define V8_PARSING_PARSER_BASE_H_
#include <vector>
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/base/hashmap.h"
+#include "src/base/v8-fallthrough.h"
#include "src/counters.h"
#include "src/globals.h"
#include "src/log.h"
@@ -277,13 +278,13 @@ class ParserBase {
script_id_(script_id),
allow_natives_(false),
allow_harmony_do_expressions_(false),
- allow_harmony_function_sent_(false),
allow_harmony_public_fields_(false),
allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
allow_harmony_optional_catch_binding_(false),
- allow_harmony_private_fields_(false) {}
+ allow_harmony_private_fields_(false),
+ allow_eval_cache_(true) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -291,12 +292,12 @@ class ParserBase {
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(harmony_do_expressions);
- ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_public_fields);
ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
ALLOW_ACCESSORS(harmony_optional_catch_binding);
+ ALLOW_ACCESSORS(eval_cache);
#undef ALLOW_ACCESSORS
@@ -398,6 +399,9 @@ class ParserBase {
}
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ void AddSuspend() { suspend_count_++; }
+ int suspend_count() const { return suspend_count_; }
+
FunctionKind kind() const { return scope()->function_kind(); }
void RewindDestructuringAssignments(int pos) {
@@ -425,10 +429,6 @@ class ParserBase {
return &reported_errors_;
}
- ZoneList<RewritableExpressionT>* non_patterns_to_rewrite() {
- return &non_patterns_to_rewrite_;
- }
-
bool next_function_is_likely_called() const {
return next_function_is_likely_called_;
}
@@ -478,13 +478,15 @@ class ParserBase {
DeclarationScope* scope_;
ZoneList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
- ZoneList<RewritableExpressionT> non_patterns_to_rewrite_;
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
+ // How many suspends are needed for this function.
+ int suspend_count_;
+
// Record whether the next (=== immediately following) function literal is
// preceded by a parenthesis / exclamation mark. Also record the previous
// state.
@@ -1084,6 +1086,8 @@ class ParserBase {
IdentifierT ParseIdentifierName(bool* ok);
+ ExpressionT ParseIdentifierNameOrPrivateName(bool* ok);
+
ExpressionT ParseRegExpLiteral(bool* ok);
ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
@@ -1124,10 +1128,10 @@ class ParserBase {
bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
ClassLiteralPropertyT ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
- bool* is_computed_name, bool* has_seen_constructor,
- ClassLiteralProperty::Kind* property_kind, bool* is_static,
- bool* has_name_static_property, bool* ok);
+ ClassLiteralChecker* checker, ClassInfo* class_info,
+ IdentifierT* property_name, bool has_extends, bool* is_computed_name,
+ bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+ bool* is_static, bool* has_name_static_property, bool* ok);
ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, bool is_static,
bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
@@ -1407,6 +1411,7 @@ class ParserBase {
// In async generators, if there is an explicit operand to the return
// statement, await the operand.
expr = factory()->NewAwait(expr, kNoSourcePosition);
+ function_state_->AddSuspend();
}
if (is_async_function()) {
return factory()->NewAsyncReturnStatement(expr, pos, end_pos);
@@ -1451,6 +1456,10 @@ class ParserBase {
return this->scanner()->CurrentMatchesContextualEscaped(
Token::CONSTRUCTOR);
}
+ bool IsPrivateConstructor() {
+ return this->scanner()->CurrentMatchesContextualEscaped(
+ Token::PRIVATE_CONSTRUCTOR);
+ }
bool IsPrototype() {
return this->scanner()->CurrentMatchesContextualEscaped(Token::PROTOTYPE);
}
@@ -1542,13 +1551,13 @@ class ParserBase {
bool allow_natives_;
bool allow_harmony_do_expressions_;
- bool allow_harmony_function_sent_;
bool allow_harmony_public_fields_;
bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
bool allow_harmony_optional_catch_binding_;
bool allow_harmony_private_fields_;
+ bool allow_eval_cache_;
friend class DiscardableZoneScope;
};
@@ -1563,9 +1572,9 @@ ParserBase<Impl>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_(scope),
destructuring_assignments_to_rewrite_(16, scope->zone()),
- non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
dont_optimize_reason_(BailoutReason::kNoReason),
+ suspend_count_(0),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
@@ -1778,6 +1787,27 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseIdentifierNameOrPrivateName(bool* ok) {
+ int pos = position();
+ IdentifierT name;
+ ExpressionT key;
+ if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ auto key_proxy =
+ impl()->ExpressionFromIdentifier(name, pos, InferName::kNo);
+ key_proxy->set_is_private_field();
+ key = key_proxy;
+ } else {
+ name = ParseIdentifierName(CHECK_OK);
+ key = factory()->NewStringLiteral(name, pos);
+ }
+ impl()->PushLiteralName(name);
+ return key;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
bool* ok) {
int pos = peek_position();
@@ -1847,7 +1877,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
}
// CoverCallExpressionAndAsyncArrowHead
*is_async = true;
- /* falls through */
+ V8_FALLTHROUGH;
case Token::IDENTIFIER:
case Token::LET:
case Token::STATIC:
@@ -2237,7 +2267,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
return expression;
}
- // Fall-through.
+ V8_FALLTHROUGH;
default:
*name = ParseIdentifierName(CHECK_OK);
@@ -2263,8 +2293,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
ParserBase<Impl>::ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
- bool* is_computed_name, bool* has_seen_constructor,
+ ClassLiteralChecker* checker, ClassInfo* class_info, IdentifierT* name,
+ bool has_extends, bool* is_computed_name, bool* has_seen_constructor,
ClassLiteralProperty::Kind* property_kind, bool* is_static,
bool* has_name_static_property, bool* ok) {
DCHECK_NOT_NULL(has_seen_constructor);
@@ -2282,19 +2312,19 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
allow_harmony_private_fields());
int name_token_position = scanner()->peek_location().beg_pos;
- IdentifierT name = impl()->NullIdentifier();
+ *name = impl()->NullIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
name_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
kind = PropertyKind::kMethodProperty;
- name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(*name, position());
} else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
peek() == Token::RBRACE) {
- name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(*name, position());
} else if (peek() == Token::PRIVATE_NAME) {
DCHECK(allow_harmony_private_fields());
// TODO(gsathya): Make a better error message for this.
@@ -2303,21 +2333,21 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return impl()->NullLiteralProperty();
} else {
*is_static = true;
- name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
+ name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
} else if (name_token == Token::PRIVATE_NAME) {
Consume(Token::PRIVATE_NAME);
- name = impl()->GetSymbol();
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol();
+ name_expression = factory()->NewStringLiteral(*name, position());
} else {
- name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
+ name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
- if (!*has_name_static_property && *is_static && impl()->IsName(name)) {
+ if (!*has_name_static_property && *is_static && impl()->IsName(*name)) {
*has_name_static_property = true;
}
@@ -2333,13 +2363,15 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
case PropertyKind::kShorthandProperty:
case PropertyKind::kValueProperty:
if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
- *property_kind = ClassLiteralProperty::FIELD;
+ *property_kind = name_token == Token::PRIVATE_NAME
+ ? ClassLiteralProperty::PRIVATE_FIELD
+ : ClassLiteralProperty::PUBLIC_FIELD;
if (*is_static && !allow_harmony_static_fields()) {
ReportUnexpectedToken(Next());
*ok = false;
return impl()->NullLiteralProperty();
}
- if (!*is_computed_name && name_token != Token::PRIVATE_NAME) {
+ if (!*is_computed_name) {
checker->CheckClassFieldName(*is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
@@ -2349,7 +2381,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, initializer, *property_kind, *is_static,
*is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, name);
+ impl()->SetFunctionNameFromPropertyName(result, *name);
return result;
} else {
@@ -2377,14 +2409,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionKind kind = MethodKindFor(is_generator, is_async);
- if (!*is_static && impl()->IsConstructor(name)) {
+ if (!*is_static && impl()->IsConstructor(*name)) {
*has_seen_constructor = true;
kind = has_extends ? FunctionKind::kDerivedConstructor
: FunctionKind::kBaseConstructor;
}
ExpressionT value = impl()->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
@@ -2394,7 +2426,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, value, *property_kind, *is_static,
*is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, name);
+ impl()->SetFunctionNameFromPropertyName(result, *name);
return result;
}
@@ -2409,14 +2441,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
name_expression =
- factory()->NewStringLiteral(name, name_expression->position());
+ factory()->NewStringLiteral(*name, name_expression->position());
}
FunctionKind kind = is_get ? FunctionKind::kGetterFunction
: FunctionKind::kSetterFunction;
FunctionLiteralT value = impl()->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
@@ -2430,7 +2462,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
const AstRawString* prefix =
is_get ? ast_value_factory()->get_space_string()
: ast_value_factory()->set_space_string();
- impl()->SetFunctionNameFromPropertyName(result, name, prefix);
+ impl()->SetFunctionNameFromPropertyName(result, *name, prefix);
return result;
}
case PropertyKind::kSpreadProperty:
@@ -3035,6 +3067,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// a regular yield, given only one look-ahead token.
if (!delegating) break;
// Delegating yields require an RHS; fall through.
+ V8_FALLTHROUGH;
default:
expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
ValidateExpression(CHECK_OK);
@@ -3045,6 +3078,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
if (delegating) {
ExpressionT yieldstar = factory()->NewYieldStar(expression, pos);
impl()->RecordSuspendSourceRange(yieldstar, PositionAfterSemicolon());
+ function_state_->AddSuspend();
+ if (IsAsyncGeneratorFunction(function_state_->kind())) {
+ // iterator_close and delegated_iterator_output suspend ids.
+ function_state_->AddSuspend();
+ function_state_->AddSuspend();
+ }
return yieldstar;
}
@@ -3053,6 +3092,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
ExpressionT yield =
factory()->NewYield(expression, pos, Suspend::kOnExceptionThrow);
impl()->RecordSuspendSourceRange(yield, PositionAfterSemicolon());
+ function_state_->AddSuspend();
return yield;
}
@@ -3191,13 +3231,19 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
ValidateExpression(CHECK_OK);
- if (op == Token::DELETE && is_strict(language_mode())) {
- if (impl()->IsIdentifier(expression)) {
+ if (op == Token::DELETE) {
+ if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
// "delete identifier" is a syntax error in strict mode.
ReportMessage(MessageTemplate::kStrictDelete);
*ok = false;
return impl()->NullExpression();
}
+
+ if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ ReportMessage(MessageTemplate::kDeletePrivateField);
+ *ok = false;
+ return impl()->NullExpression();
+ }
}
if (peek() == Token::EXP) {
@@ -3239,6 +3285,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
MessageTemplate::kInvalidDestructuringTarget);
ExpressionT expr = factory()->NewAwait(value, await_pos);
+ function_state_->AddSuspend();
impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
return expr;
} else {
@@ -3399,10 +3446,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = position();
- IdentifierT name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewStringLiteral(name, pos), pos);
- impl()->PushLiteralName(name);
+ ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
+ result = factory()->NewProperty(result, key, pos);
break;
}
@@ -3513,22 +3558,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
Consume(Token::FUNCTION);
int function_token_position = position();
- if (allow_harmony_function_sent() && peek() == Token::PERIOD) {
- // function.sent
- int pos = position();
- ExpectMetaProperty(Token::SENT, "function.sent", pos, CHECK_OK);
-
- if (!is_generator()) {
- // TODO(neis): allow escaping into closures?
- impl()->ReportMessageAt(scanner()->location(),
- MessageTemplate::kUnexpectedFunctionSent);
- *ok = false;
- return impl()->NullExpression();
- }
-
- return impl()->FunctionSentExpression(pos);
- }
-
FunctionKind function_kind = Check(Token::MUL)
? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction;
@@ -3699,16 +3728,8 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
Consume(Token::PERIOD);
int pos = peek_position();
- IdentifierT name;
- if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
- Consume(Token::PRIVATE_NAME);
- name = impl()->GetSymbol();
- } else {
- name = ParseIdentifierName(CHECK_OK);
- }
- expression = factory()->NewProperty(
- expression, factory()->NewStringLiteral(name, pos), pos);
- impl()->PushLiteralName(name);
+ ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
+ expression = factory()->NewProperty(expression, key, pos);
break;
}
case Token::TEMPLATE_SPAN:
@@ -4354,6 +4375,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
StatementListT body = impl()->NullStatementList();
int expected_property_count = -1;
+ int suspend_count = 0;
int function_literal_id = GetNextFunctionLiteralId();
FunctionKind kind = formal_parameters.scope->function_kind();
@@ -4440,6 +4462,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
impl()->RewriteDestructuringAssignments();
+ suspend_count = function_state.suspend_count();
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
@@ -4451,6 +4474,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
formal_parameters.scope->start_position(), has_braces,
function_literal_id, produced_preparsed_scope_data);
+ function_literal->set_suspend_count(suspend_count);
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
@@ -4522,27 +4546,29 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
bool is_static;
ClassLiteralProperty::Kind property_kind;
ExpressionClassifier property_classifier(this);
+ IdentifierT property_name;
// If we haven't seen the constructor yet, it potentially is the next
// property.
bool is_constructor = !class_info.has_seen_constructor;
ClassLiteralPropertyT property = ParseClassPropertyDefinition(
- &checker, &class_info, has_extends, &is_computed_name,
+ &checker, &class_info, &property_name, has_extends, &is_computed_name,
&class_info.has_seen_constructor, &property_kind, &is_static,
&class_info.has_name_static_property, CHECK_OK);
if (!class_info.has_static_computed_names && is_static &&
is_computed_name) {
class_info.has_static_computed_names = true;
}
- if (is_computed_name && property_kind == ClassLiteralProperty::FIELD) {
+ if (is_computed_name &&
+ property_kind == ClassLiteralProperty::PUBLIC_FIELD) {
class_info.computed_field_count++;
}
is_constructor &= class_info.has_seen_constructor;
ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
- impl()->DeclareClassProperty(name, property, property_kind, is_static,
- is_constructor, is_computed_name, &class_info,
- CHECK_OK);
+ impl()->DeclareClassProperty(name, property, property_name, property_kind,
+ is_static, is_constructor, is_computed_name,
+ &class_info, CHECK_OK);
impl()->InferFunctionName();
}
@@ -4644,6 +4670,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
// TEMPLATE_SPAN, or a TEMPLATE_TAIL.
DCHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
+ if (tagged) {
+ // TaggedTemplate expressions prevent the eval compilation cache from being
+ // used. This flag is only used if an eval is being parsed.
+ set_allow_eval_cache(false);
+ }
+
bool forbid_illegal_escapes = !tagged;
// If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
@@ -4794,7 +4826,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic(
ExpressionClassifier classifier(this);
ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
- DCHECK(!spread_pos.IsValid());
+ if (spread_pos.IsValid()) {
+ *ok = false;
+ ReportMessageAt(spread_pos, MessageTemplate::kIntrinsicWithSpread,
+ kSyntaxError);
+ return impl()->NullExpression();
+ }
return impl()->NewV8Intrinsic(name, args, pos, ok);
}
@@ -4943,7 +4980,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
Consume(Token::ASYNC);
return ParseAsyncFunctionDeclaration(nullptr, false, ok);
}
- /* falls through */
+ break;
default:
break;
}
@@ -5044,7 +5081,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
*ok = false;
return impl()->NullStatement();
}
- // Falls through
+ V8_FALLTHROUGH;
default:
return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
}
@@ -5653,6 +5690,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ZoneList<const AstRawString*>* labels, bool* ok) {
+ // Either a standard for loop
+ // for (<init>; <cond>; <next>) { ... }
+ // or a for-each loop
+ // for (<each> of|in <iterable>) { ... }
+ //
+ // We parse a declaration/expression after the 'for (' and then read the first
+ // expression/declaration before we know if this is a for or a for-each.
+
int stmt_pos = peek_position();
ForInfo for_info(this);
@@ -6187,7 +6232,7 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
return;
}
- if (IsConstructor()) {
+ if (IsConstructor() || IsPrivateConstructor()) {
this->parser()->ReportMessage(MessageTemplate::kConstructorClassField);
*ok = false;
return;
@@ -6201,4 +6246,4 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_PARSER_BASE_H
+#endif // V8_PARSING_PARSER_BASE_H_
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 0497958c82..8dc16a8b35 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -29,82 +29,8 @@
namespace v8 {
namespace internal {
-ScriptData::ScriptData(const byte* data, int length)
- : owns_data_(false), rejected_(false), data_(data), length_(length) {
- if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
- byte* copy = NewArray<byte>(length);
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
- CopyBytes(copy, data, length);
- data_ = copy;
- AcquireDataOwnership();
- }
-}
-
-FunctionEntry ParseData::GetFunctionEntry(int start) {
- // The current pre-data entry must be a FunctionEntry with the given
- // start position.
- if ((function_index_ + FunctionEntry::kSize <= Length()) &&
- (static_cast<int>(Data()[function_index_]) == start)) {
- int index = function_index_;
- function_index_ += FunctionEntry::kSize;
- Vector<unsigned> subvector(&(Data()[index]), FunctionEntry::kSize);
- return FunctionEntry(subvector);
- }
- return FunctionEntry();
-}
-
-
-int ParseData::FunctionCount() {
- int functions_size = FunctionsSize();
- if (functions_size < 0) return 0;
- if (functions_size % FunctionEntry::kSize != 0) return 0;
- return functions_size / FunctionEntry::kSize;
-}
-
-
-bool ParseData::IsSane() {
- if (!IsAligned(script_data_->length(), sizeof(unsigned))) return false;
- // Check that the header data is valid and doesn't specify
- // point to positions outside the store.
- int data_length = Length();
- if (data_length < PreparseDataConstants::kHeaderSize) return false;
- if (Magic() != PreparseDataConstants::kMagicNumber) return false;
- if (Version() != PreparseDataConstants::kCurrentVersion) return false;
- // Check that the space allocated for function entries is sane.
- int functions_size = FunctionsSize();
- if (functions_size < 0) return false;
- if (functions_size % FunctionEntry::kSize != 0) return false;
- // Check that the total size has room for header and function entries.
- int minimum_size =
- PreparseDataConstants::kHeaderSize + functions_size;
- if (data_length < minimum_size) return false;
- return true;
-}
-
-
-void ParseData::Initialize() {
- // Prepares state for use.
- int data_length = Length();
- if (data_length >= PreparseDataConstants::kHeaderSize) {
- function_index_ = PreparseDataConstants::kHeaderSize;
- }
-}
-
-
-unsigned ParseData::Magic() {
- return Data()[PreparseDataConstants::kMagicOffset];
-}
-
-
-unsigned ParseData::Version() {
- return Data()[PreparseDataConstants::kVersionOffset];
-}
-int ParseData::FunctionsSize() {
- return static_cast<int>(Data()[PreparseDataConstants::kFunctionsSizeOffset]);
-}
-
// Helper for putting parts of the parse results into a temporary zone when
// parsing inner function bodies.
class DiscardableZoneScope {
@@ -153,17 +79,6 @@ class DiscardableZoneScope {
DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
};
-void Parser::SetCachedData(ParseInfo* info) {
- DCHECK_NULL(cached_parse_data_);
- if (consume_cached_parse_data()) {
- if (allow_lazy_) {
- cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
- if (cached_parse_data_ != nullptr) return;
- }
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
-}
-
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool call_super, int pos,
int end_pos) {
@@ -403,16 +318,6 @@ Expression* Parser::NewTargetExpression(int pos) {
return proxy;
}
-Expression* Parser::FunctionSentExpression(int pos) {
- // We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
- VariableProxy* generator = factory()->NewVariableProxy(
- function_state_->scope()->generator_object_var());
- args->Add(generator, zone());
- return factory()->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
- args, pos);
-}
-
Expression* Parser::ImportMetaExpression(int pos) {
return factory()->NewCallRuntime(
Runtime::kInlineGetImportMetaObject,
@@ -511,11 +416,8 @@ Parser::Parser(ParseInfo* info)
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
target_stack_(nullptr),
- compile_options_(info->compile_options()),
- cached_parse_data_(nullptr),
total_preparse_skipped_(0),
temp_zoned_(false),
- log_(nullptr),
consumed_preparsed_scope_data_(info->consumed_preparsed_scope_data()),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
@@ -541,7 +443,6 @@ Parser::Parser(ParseInfo* info)
info->extension() == nullptr && can_compile_lazily;
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
- set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_public_fields(FLAG_harmony_public_fields);
set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
@@ -603,18 +504,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
- ParserLogger logger;
-
- if (produce_cached_parse_data()) {
- if (allow_lazy_) {
- log_ = &logger;
- } else {
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
- } else if (consume_cached_parse_data()) {
- cached_parse_data_->Initialize();
- }
-
DeserializeScopeChain(info, info->maybe_outer_scope_info());
scanner_.Initialize(info->character_stream(), info->is_module());
@@ -623,11 +512,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
HandleSourceURLComments(isolate, info->script());
- if (produce_cached_parse_data() && result != nullptr) {
- *info->cached_data() = logger.GetScriptData();
- }
- log_ = nullptr;
-
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
const char* event_name = "parse-eval";
@@ -742,6 +626,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
result = factory()->NewScriptOrEvalFunctionLiteral(
scope, body, function_state.expected_property_count(),
parameter_count);
+ result->set_suspend_count(function_state.suspend_count());
}
}
@@ -904,8 +789,9 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
scope->set_start_position(info->start_position());
ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
- int rewritable_length =
- function_state.destructuring_assignments_to_rewrite().length();
+ // The outer FunctionState should not contain destructuring assignments.
+ DCHECK_EQ(0,
+ function_state.destructuring_assignments_to_rewrite().length());
{
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Enter arrow function
@@ -943,8 +829,12 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
- Expression* expression =
- ParseArrowFunctionLiteral(true, formals, rewritable_length, &ok);
+ const bool accept_IN = true;
+ // Any destructuring assignments in the current FunctionState
+ // actually belong to the arrow function itself.
+ const int rewritable_length = 0;
+ Expression* expression = ParseArrowFunctionLiteral(
+ accept_IN, formals, rewritable_length, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -957,10 +847,6 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// must produce a FunctionLiteral.
DCHECK(expression->IsFunctionLiteral());
result = expression->AsFunctionLiteral();
- // Rewrite destructuring assignments in the parameters. (The ones
- // inside the function body are rewritten by
- // ParseArrowFunctionLiteral.)
- RewriteDestructuringAssignments();
} else {
ok = false;
}
@@ -1293,7 +1179,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
break;
}
- /* falls through */
+ V8_FALLTHROUGH;
default: {
int pos = position();
@@ -1531,8 +1417,7 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
ZoneList<const AstRawString*>* names,
bool* ok) {
VariableProxy* proxy =
- factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
-
+ factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, pos);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, function, pos);
Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
@@ -1831,6 +1716,8 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// Don't create iterator result for async generators, as the resume methods
// will create it.
+ // TODO(leszeks): This will create another suspend point, which is unnecessary
+ // if there is already an unconditional return in the body.
Statement* final_return = BuildReturnStatement(
factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
try_block->statements()->Add(final_return, zone());
@@ -1900,6 +1787,7 @@ Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
Expression* next_call =
factory()->NewCall(next_property, next_arguments, kNoSourcePosition);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
next_call = factory()->NewAwait(next_call, pos);
}
Expression* result_proxy = factory()->NewVariableProxy(result);
@@ -2681,6 +2569,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
ZoneList<Statement*>* body = nullptr;
int expected_property_count = -1;
+ int suspend_count = -1;
int num_parameters = -1;
int function_length = -1;
bool has_duplicate_parameters = false;
@@ -2747,10 +2636,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) {
scope->AnalyzePartially(&previous_zone_ast_node_factory);
} else {
- body = ParseFunction(function_name, pos, kind, function_type, scope,
- &num_parameters, &function_length,
- &has_duplicate_parameters, &expected_property_count,
- arguments_for_wrapped_function, CHECK_OK);
+ body = ParseFunction(
+ function_name, pos, kind, function_type, scope, &num_parameters,
+ &function_length, &has_duplicate_parameters, &expected_property_count,
+ &suspend_count, arguments_for_wrapped_function, CHECK_OK);
}
DCHECK_EQ(should_preparse, temp_zoned_);
@@ -2808,6 +2697,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_length, duplicate_parameters, function_type, eager_compile_hint,
pos, true, function_literal_id, produced_preparsed_scope_data);
function_literal->set_function_token_position(function_token_pos);
+ function_literal->set_suspend_count(suspend_count);
if (should_infer_name) {
DCHECK_NOT_NULL(fni_);
@@ -2826,38 +2716,11 @@ Parser::LazyParsingResult Parser::SkipFunction(
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
- if (produce_cached_parse_data()) CHECK(log_);
DCHECK_IMPLIES(IsArrowFunction(kind),
scanner()->current_token() == Token::ARROW);
- // Inner functions are not part of the cached data.
- if (!is_inner_function && consume_cached_parse_data() &&
- !cached_parse_data_->rejected()) {
- // If we have cached data, we use it to skip parsing the function. The data
- // contains the information we need to construct the lazy function.
- FunctionEntry entry =
- cached_parse_data_->GetFunctionEntry(function_scope->start_position());
- // Check that cached data is valid. If not, mark it as invalid (the embedder
- // handles it). Note that end position greater than end of stream is safe,
- // and hard to check.
- if (entry.is_valid() &&
- entry.end_pos() > function_scope->start_position()) {
- total_preparse_skipped_ += entry.end_pos() - position();
- function_scope->set_end_position(entry.end_pos());
- scanner()->SeekForward(entry.end_pos() - 1);
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- *num_parameters = entry.num_parameters();
- SetLanguageMode(function_scope, entry.language_mode());
- if (entry.uses_super_property())
- function_scope->RecordSuperPropertyUsage();
- SkipFunctionLiterals(entry.num_inner_functions());
- return kLazyParsingComplete;
- }
- cached_parse_data_->Reject();
- }
-
- // FIXME(marja): There are 3 ways to skip functions now. Unify them.
+ // FIXME(marja): There are 2 ways to skip functions now. Unify them.
DCHECK_NOT_NULL(consumed_preparsed_scope_data_);
if (consumed_preparsed_scope_data_->HasData()) {
DCHECK(FLAG_preparser_scope_analysis);
@@ -2908,6 +2771,9 @@ Parser::LazyParsingResult Parser::SkipFunction(
*ok = false;
return kLazyParsingComplete;
}
+
+ set_allow_eval_cache(reusable_preparser()->allow_eval_cache());
+
PreParserLogger* logger = reusable_preparser()->logger();
function_scope->set_end_position(logger->end());
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
@@ -2915,13 +2781,6 @@ Parser::LazyParsingResult Parser::SkipFunction(
function_scope->end_position() - function_scope->start_position();
*num_parameters = logger->num_parameters();
SkipFunctionLiterals(logger->num_inner_functions());
- if (!is_inner_function && produce_cached_parse_data()) {
- DCHECK(log_);
- log_->LogFunction(function_scope->start_position(),
- function_scope->end_position(), *num_parameters,
- language_mode(), function_scope->NeedsHomeObject(),
- logger->num_inner_functions());
- }
return kLazyParsingComplete;
}
@@ -3136,7 +2995,7 @@ Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
Expression* call_runtime =
- factory()->NewCallRuntime(Context::PROMISE_RESOLVE_INDEX, args, pos);
+ factory()->NewCallRuntime(Runtime::kInlineResolvePromise, args, pos);
return factory()->NewBinaryOperation(
Token::COMMA, call_runtime,
factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3150,8 +3009,8 @@ Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
args->Add(factory()->NewBooleanLiteral(false, pos), zone());
- Expression* call_runtime = factory()->NewCallRuntime(
- Context::PROMISE_INTERNAL_REJECT_INDEX, args, pos);
+ Expression* call_runtime =
+ factory()->NewCallRuntime(Runtime::kInlineRejectPromise, args, pos);
return factory()->NewBinaryOperation(
Token::COMMA, call_runtime,
factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3175,6 +3034,7 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
// The position of the yield is important for reporting the exception
// caused by calling the .throw method on a generator suspended at the
// initial yield (i.e. right after generator instantiation).
+ function_state_->AddSuspend();
return factory()->NewYield(yield_result, scope()->start_position(),
Suspend::kOnExceptionThrow);
}
@@ -3184,6 +3044,7 @@ ZoneList<Statement*>* Parser::ParseFunction(
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
bool* has_duplicate_parameters, int* expected_property_count,
+ int* suspend_count,
ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
@@ -3268,6 +3129,7 @@ ZoneList<Statement*>* Parser::ParseFunction(
!classifier()->is_valid_formal_parameter_list_without_duplicates();
*expected_property_count = function_state.expected_property_count();
+ *suspend_count = function_state.suspend_count();
return body;
}
@@ -3308,6 +3170,7 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name,
// - properties
void Parser::DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ const AstRawString* property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name, ClassInfo* class_info,
@@ -3322,7 +3185,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
return;
}
- if (kind != ClassLiteralProperty::FIELD) {
+ if (kind != ClassLiteralProperty::PUBLIC_FIELD &&
+ kind != ClassLiteralProperty::PRIVATE_FIELD) {
class_info->properties->Add(property, zone());
return;
}
@@ -3331,12 +3195,14 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
if (is_static) {
DCHECK(allow_harmony_static_fields());
+ DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
}
if (is_computed_name) {
+ DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
// We create a synthetic variable name here so that scope
// analysis doesn't dedupe the vars.
Variable* computed_name_var = CreateSyntheticContextVariable(
@@ -3346,6 +3212,13 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
property->set_computed_name_var(computed_name_var);
class_info->properties->Add(property, zone());
}
+
+ if (kind == ClassLiteralProperty::PRIVATE_FIELD) {
+ Variable* private_field_name_var =
+ CreateSyntheticContextVariable(property_name, CHECK_OK_VOID);
+ property->set_private_field_name_var(private_field_name_var);
+ class_info->properties->Add(property, zone());
+ }
}
FunctionLiteral* Parser::CreateInitializerFunction(
@@ -3432,6 +3305,15 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
}
}
+bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
+ if (!expression->IsProperty()) return false;
+ Property* property = expression->AsProperty();
+
+ if (!property->key()->IsVariableProxy()) return false;
+ VariableProxy* key = property->key()->AsVariableProxy();
+
+ return key->is_private_field();
+}
void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
// For each var-binding that shadows a parameter, insert an assignment
@@ -3550,15 +3432,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
- ParserLogger logger;
- if (produce_cached_parse_data()) {
- if (allow_lazy_) {
- log_ = &logger;
- } else {
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
- }
-
scanner_.Initialize(info->character_stream(), info->is_module());
DCHECK(info->maybe_outer_scope_info().is_null());
@@ -3582,11 +3455,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// We cannot internalize on a background thread; a foreground task will take
// care of calling AstValueFactory::Internalize just before compilation.
-
- if (produce_cached_parse_data()) {
- if (result != nullptr) *info->cached_data() = logger.GetScriptData();
- log_ = nullptr;
- }
}
Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
@@ -3654,9 +3522,8 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
return expr;
} else {
// GetTemplateObject
- const int32_t hash = ComputeTemplateLiteralHash(lit);
Expression* template_object =
- factory()->NewGetTemplateObject(cooked_strings, raw_strings, hash, pos);
+ factory()->NewGetTemplateObject(cooked_strings, raw_strings, pos);
// Call TagFn
ZoneList<Expression*>* call_args =
@@ -3669,51 +3536,6 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
namespace {
-// http://burtleburtle.net/bob/hash/integer.html
-uint32_t HalfAvalance(uint32_t a) {
- a = (a + 0x479AB41D) + (a << 8);
- a = (a ^ 0xE4AA10CE) ^ (a >> 5);
- a = (a + 0x9942F0A6) - (a << 14);
- a = (a ^ 0x5AEDD67D) ^ (a >> 3);
- a = (a + 0x17BEA992) + (a << 7);
- return a;
-}
-
-} // namespace
-
-int32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
- const ZoneList<const AstRawString*>* raw_strings = lit->raw();
- int total = raw_strings->length();
- DCHECK_GT(total, 0);
-
- uint32_t running_hash = 0;
-
- for (int index = 0; index < total; ++index) {
- if (index) {
- running_hash = StringHasher::ComputeRunningHashOneByte(
- running_hash, "${}", 3);
- }
-
- const AstRawString* raw_string = raw_strings->at(index);
- if (raw_string->is_one_byte()) {
- const char* data = reinterpret_cast<const char*>(raw_string->raw_data());
- running_hash = StringHasher::ComputeRunningHashOneByte(
- running_hash, data, raw_string->length());
- } else {
- const uc16* data = reinterpret_cast<const uc16*>(raw_string->raw_data());
- running_hash = StringHasher::ComputeRunningHash(running_hash, data,
- raw_string->length());
- }
- }
-
- // Pass {running_hash} throught a decent 'half avalance' hash function
- // and take the most significant bits (in Smi range).
- return static_cast<int32_t>(HalfAvalance(running_hash)) >>
- (sizeof(int32_t) * CHAR_BIT - kSmiValueSize);
-}
-
-namespace {
-
bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
for (int i = 0; i < args->length() - 1; i++) {
if (args->at(i)->IsSpread()) {
@@ -3911,6 +3733,9 @@ void Parser::RewriteDestructuringAssignments() {
// pair.scope may already have been removed by FinalizeBlockScope in the
// meantime.
Scope* scope = to_rewrite->scope()->GetUnremovedScope();
+ // Scope at the time of the rewriting and the original parsing
+ // should be in the same function.
+ DCHECK(scope->GetClosureScope() == scope_->GetClosureScope());
BlockState block_state(&scope_, scope);
RewriteDestructuringAssignment(to_rewrite);
}
@@ -4070,6 +3895,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
Expression* output_proxy = factory()->NewVariableProxy(var_output);
@@ -4288,6 +4114,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
@@ -4315,6 +4142,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index f92eddcd9d..dcc222da0f 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -14,7 +14,6 @@
#include "src/globals.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/utils.h"
@@ -27,7 +26,6 @@ namespace internal {
class ConsumedPreParsedScopeData;
class ParseInfo;
-class ScriptData;
class ParserTarget;
class ParserTargetScope;
class PendingCompilationErrorHandler;
@@ -77,47 +75,6 @@ class FunctionEntry BASE_EMBEDDED {
};
-// Wrapper around ScriptData to provide parser-specific functionality.
-class ParseData {
- public:
- static ParseData* FromCachedData(ScriptData* cached_data) {
- ParseData* pd = new ParseData(cached_data);
- if (pd->IsSane()) return pd;
- cached_data->Reject();
- delete pd;
- return nullptr;
- }
-
- void Initialize();
- FunctionEntry GetFunctionEntry(int start);
- int FunctionCount();
-
- unsigned* Data() { // Writable data as unsigned int array.
- return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
- }
-
- void Reject() { script_data_->Reject(); }
-
- bool rejected() const { return script_data_->rejected(); }
-
- private:
- explicit ParseData(ScriptData* script_data) : script_data_(script_data) {}
-
- bool IsSane();
- unsigned Magic();
- unsigned Version();
- int FunctionsSize();
- int Length() const {
- // Script data length is already checked to be a multiple of unsigned size.
- return script_data_->length() / sizeof(unsigned);
- }
-
- ScriptData* script_data_;
- int function_index_;
-
- DISALLOW_COPY_AND_ASSIGN(ParseData);
-};
-
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
@@ -192,8 +149,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = nullptr;
- delete cached_parse_data_;
- cached_parse_data_ = nullptr;
}
static bool IsPreParser() { return false; }
@@ -276,20 +231,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ZoneList<const AstRawString*>* PrepareWrappedArguments(ParseInfo* info,
Zone* zone);
- void SetCachedData(ParseInfo* info);
-
void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
- ScriptCompiler::CompileOptions compile_options() const {
- return compile_options_;
- }
- bool consume_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kConsumeParserCache;
- }
- bool produce_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kProduceParserCache;
- }
-
PreParser* reusable_preparser() {
if (reusable_preparser_ == nullptr) {
reusable_preparser_ =
@@ -299,7 +242,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_do_expressions);
- SET_ALLOW(harmony_function_sent);
SET_ALLOW(harmony_public_fields);
SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
@@ -307,6 +249,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_bigint);
SET_ALLOW(harmony_optional_catch_binding);
SET_ALLOW(harmony_private_fields);
+ SET_ALLOW(eval_cache);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -377,6 +320,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int class_token_pos, bool* ok);
V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ const AstRawString* property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name,
@@ -456,6 +400,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ bool IsPropertyWithPrivateFieldKey(Expression* property);
+
// Insert initializer statements for var-bindings shadowing parameter bindings
// from a non-simple parameter list.
void InsertShadowingVarBindingInitializers(Block* block);
@@ -506,7 +452,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* expected_property_count,
+ int* expected_property_count, int* suspend_count,
ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -555,7 +501,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* expression);
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- int32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
@@ -883,12 +828,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* NewSuperPropertyReference(int pos);
Expression* NewSuperCallReference(int pos);
Expression* NewTargetExpression(int pos);
- Expression* FunctionSentExpression(int pos);
Expression* ImportMetaExpression(int pos);
Literal* ExpressionFromLiteral(Token::Value token, int pos);
- V8_INLINE Expression* ExpressionFromIdentifier(
+ V8_INLINE VariableProxy* ExpressionFromIdentifier(
const AstRawString* name, int start_position,
InferName infer = InferName::kYes) {
if (infer == InferName::kYes) {
@@ -1144,7 +1088,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ParserTarget* target_stack_; // for break, continue statements
ScriptCompiler::CompileOptions compile_options_;
- ParseData* cached_parse_data_;
// Other information which will be stored in Parser and moved to Isolate after
// parsing.
@@ -1152,7 +1095,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int total_preparse_skipped_;
bool allow_lazy_;
bool temp_zoned_;
- ParserLogger* log_;
ConsumedPreParsedScopeData* consumed_preparsed_scope_data_;
// If not kNoSourcePosition, indicates that the first function literal
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index bc3c6dec7b..d34f826a23 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -36,7 +36,6 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
- parser.SetCachedData(info);
result = parser.ParseProgram(isolate, info);
info->set_literal(result);
if (result == nullptr) {
@@ -45,6 +44,9 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
info->set_language_mode(info->literal()->language_mode());
+ if (info->is_eval()) {
+ info->set_allow_eval_cache(parser.allow_eval_cache());
+ }
}
parser.UpdateStatistics(isolate, info->script());
return (result != nullptr);
@@ -79,6 +81,9 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
info->ast_value_factory());
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
+ if (info->is_eval()) {
+ info->set_allow_eval_cache(parser.allow_eval_cache());
+ }
}
parser.UpdateStatistics(isolate, info->script());
return (result != nullptr);
diff --git a/deps/v8/src/parsing/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
deleted file mode 100644
index 2f317ce75f..0000000000
--- a/deps/v8/src/parsing/preparse-data-format.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_PREPARSE_DATA_FORMAT_H_
-#define V8_PARSING_PREPARSE_DATA_FORMAT_H_
-
-namespace v8 {
-namespace internal {
-
-// Generic and general data used by preparse data recorders and readers.
-
-struct PreparseDataConstants {
- public:
- // Layout and constants of the preparse data exchange format.
- static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 17;
-
- static const int kMagicOffset = 0;
- static const int kVersionOffset = 1;
- static const int kFunctionsSizeOffset = 2;
- static const int kSizeOffset = 3;
- static const int kHeaderSize = 4;
-
- static const unsigned char kNumberTerminator = 0x80u;
-};
-
-
-} // namespace internal
-} // namespace v8.
-
-#endif // V8_PARSING_PREPARSE_DATA_FORMAT_H_
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index f11eb7b21e..e39218111d 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -8,53 +8,10 @@
#include "src/globals.h"
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
-#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
-void ParserLogger::LogFunction(int start, int end, int num_parameters,
- LanguageMode language_mode,
- bool uses_super_property,
- int num_inner_functions) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(num_parameters);
- function_store_.Add(
- FunctionEntry::EncodeFlags(language_mode, uses_super_property));
- function_store_.Add(num_inner_functions);
-}
-
-ParserLogger::ParserLogger() {
- preamble_[PreparseDataConstants::kMagicOffset] =
- PreparseDataConstants::kMagicNumber;
- preamble_[PreparseDataConstants::kVersionOffset] =
- PreparseDataConstants::kCurrentVersion;
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
- preamble_[PreparseDataConstants::kSizeOffset] = 0;
- DCHECK_EQ(4, PreparseDataConstants::kHeaderSize);
-#ifdef DEBUG
- prev_start_ = -1;
-#endif
-}
-
-ScriptData* ParserLogger::GetScriptData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- unsigned* data = NewArray<unsigned>(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- MemCopy(data, preamble_, sizeof(preamble_));
- if (function_size > 0) {
- function_store_.WriteTo(Vector<unsigned>(
- data + PreparseDataConstants::kHeaderSize, function_size));
- }
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
- ScriptData* result = new ScriptData(reinterpret_cast<byte*>(data),
- total_size * sizeof(unsigned));
- result->AcquireDataOwnership();
- return result;
-}
-
PreParseData::FunctionData PreParseData::GetFunctionData(int start) const {
auto it = functions_.find(start);
if (it != functions_.end()) {
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index b5db652c9c..0e40c76927 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -11,43 +11,9 @@
#include "src/base/hashmap.h"
#include "src/collector.h"
#include "src/messages.h"
-#include "src/parsing/preparse-data-format.h"
-
namespace v8 {
namespace internal {
-class ScriptData {
- public:
- ScriptData(const byte* data, int length);
- ~ScriptData() {
- if (owns_data_) DeleteArray(data_);
- }
-
- const byte* data() const { return data_; }
- int length() const { return length_; }
- bool rejected() const { return rejected_; }
-
- void Reject() { rejected_ = true; }
-
- void AcquireDataOwnership() {
- DCHECK(!owns_data_);
- owns_data_ = true;
- }
-
- void ReleaseDataOwnership() {
- DCHECK(owns_data_);
- owns_data_ = false;
- }
-
- private:
- bool owns_data_ : 1;
- bool rejected_ : 1;
- const byte* data_;
- int length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptData);
-};
-
class PreParserLogger final {
public:
PreParserLogger()
@@ -74,25 +40,6 @@ class PreParserLogger final {
int num_inner_functions_;
};
-class ParserLogger final {
- public:
- ParserLogger();
-
- void LogFunction(int start, int end, int num_parameters,
- LanguageMode language_mode, bool uses_super_property,
- int num_inner_functions);
-
- ScriptData* GetScriptData();
-
- private:
- Collector<unsigned> function_store_;
- unsigned preamble_[PreparseDataConstants::kHeaderSize];
-
-#ifdef DEBUG
- int prev_start_;
-#endif
-};
-
class PreParseData final {
public:
struct FunctionData {
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 786be3f0e5..70880f55c4 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -396,6 +396,7 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
const AstRawString* name = var->raw_name();
+ byte_data_->WriteUint8(name->is_one_byte());
byte_data_->WriteUint32(name->length());
for (int i = 0; i < name->length(); ++i) {
byte_data_->WriteUint8(name->raw_data()[i]);
@@ -571,8 +572,7 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope_data_->RemainingBytes() < kUint8Size) {
// Temporary debugging code for detecting inconsistent data. Write debug
// information on the stack, then crash.
- data_->GetIsolate()->PushStackTraceAndDie(0xC0DEFEE, nullptr, nullptr,
- 0xC0DEFEE);
+ data_->GetIsolate()->PushStackTraceAndDie();
}
// scope_type is stored only in debug mode.
@@ -606,9 +606,20 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
#ifdef DEBUG
const AstRawString* name = var->raw_name();
+ bool data_one_byte = scope_data_->ReadUint8();
+ DCHECK_IMPLIES(name->is_one_byte(), data_one_byte);
DCHECK_EQ(scope_data_->ReadUint32(), static_cast<uint32_t>(name->length()));
- for (int i = 0; i < name->length(); ++i) {
- DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ if (!name->is_one_byte() && data_one_byte) {
+ // It's possible that "name" is a two-byte representation of the string
+ // stored in the data.
+ for (int i = 0; i < 2 * name->length(); i += 2) {
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ DCHECK_EQ(0, name->raw_data()[i + 1]);
+ }
+ } else {
+ for (int i = 0; i < name->length(); ++i) {
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ }
}
#endif
uint8_t variable_data = scope_data_->ReadQuarter();
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index b28eab2e75..5bb58a03aa 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -11,7 +11,6 @@
#include "src/globals.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/parsing/preparser.h"
@@ -50,6 +49,8 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
return PreParserIdentifier::Await();
case Token::ASYNC:
return PreParserIdentifier::Async();
+ case Token::PRIVATE_NAME:
+ return PreParserIdentifier::PrivateName();
default:
break;
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 705cd011ee..86fa7d1150 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_PREPARSER_H
-#define V8_PARSING_PREPARSER_H
+#ifndef V8_PARSING_PREPARSER_H_
+#define V8_PARSING_PREPARSER_H_
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
@@ -51,6 +51,9 @@ class PreParserIdentifier {
static PreParserIdentifier Name() {
return PreParserIdentifier(kNameIdentifier);
}
+ static PreParserIdentifier PrivateName() {
+ return PreParserIdentifier(kPrivateNameIdentifier);
+ }
bool IsNull() const { return type_ == kNullIdentifier; }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
@@ -58,6 +61,7 @@ class PreParserIdentifier {
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
bool IsAwait() const { return type_ == kAwaitIdentifier; }
bool IsName() const { return type_ == kNameIdentifier; }
+ bool IsPrivateName() const { return type_ == kPrivateNameIdentifier; }
private:
enum Type {
@@ -68,7 +72,8 @@ class PreParserIdentifier {
kConstructorIdentifier,
kAwaitIdentifier,
kAsyncIdentifier,
- kNameIdentifier
+ kNameIdentifier,
+ kPrivateNameIdentifier
};
explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
@@ -169,6 +174,12 @@ class PreParserExpression {
variables);
}
+ static PreParserExpression ThisPropertyWithPrivateFieldKey() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(
+ kThisPropertyExpressionWithPrivateFieldKey));
+ }
+
static PreParserExpression ThisProperty() {
return PreParserExpression(
TypeField::encode(kExpression) |
@@ -181,6 +192,12 @@ class PreParserExpression {
ExpressionTypeField::encode(kPropertyExpression));
}
+ static PreParserExpression PropertyWithPrivateFieldKey() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kPropertyExpressionWithPrivateFieldKey));
+ }
+
static PreParserExpression Call() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kCallExpression));
@@ -254,13 +271,27 @@ class PreParserExpression {
bool IsThisProperty() const {
return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisPropertyExpression;
+ (ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
}
bool IsProperty() const {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kPropertyExpression ||
- ExpressionTypeField::decode(code_) == kThisPropertyExpression);
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) ==
+ kPropertyExpressionWithPrivateFieldKey ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
+ }
+
+ bool IsPropertyWithPrivateFieldKey() const {
+ return TypeField::decode(code_) == kExpression &&
+ (ExpressionTypeField::decode(code_) ==
+ kPropertyExpressionWithPrivateFieldKey ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
}
bool IsCall() const {
@@ -298,12 +329,22 @@ class PreParserExpression {
// and PreParser.
PreParserExpression* operator->() { return this; }
+ void set_is_private_field() {
+ if (variables_ != nullptr) {
+ DCHECK(IsIdentifier());
+ DCHECK(AsIdentifier().IsPrivateName());
+ DCHECK_EQ(1, variables_->length());
+ variables_->first()->set_is_private_field();
+ }
+ }
+
// More dummy implementations of things PreParser doesn't need to track:
void SetShouldEagerCompile() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
void set_scope(Scope* scope) {}
+ void set_suspend_count(int suspend_count) {}
private:
enum Type {
@@ -319,7 +360,9 @@ class PreParserExpression {
enum ExpressionType {
kThisExpression,
kThisPropertyExpression,
+ kThisPropertyExpressionWithPrivateFieldKey,
kPropertyExpression,
+ kPropertyExpressionWithPrivateFieldKey,
kCallExpression,
kCallEvalExpression,
kCallTaggedTemplateExpression,
@@ -580,8 +623,16 @@ class PreParserFactory {
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
}
+
PreParserExpression NewProperty(const PreParserExpression& obj,
const PreParserExpression& key, int pos) {
+ if (key.IsIdentifier() && key.AsIdentifier().IsPrivateName()) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisPropertyWithPrivateFieldKey();
+ }
+ return PreParserExpression::PropertyWithPrivateFieldKey();
+ }
+
if (obj.IsThis()) {
return PreParserExpression::ThisProperty();
}
@@ -984,6 +1035,10 @@ class PreParser : public ParserBase<PreParser> {
TemplateLiteralState* state, int start, const PreParserExpression& tag) {
return PreParserExpression::Default();
}
+ V8_INLINE bool IsPropertyWithPrivateFieldKey(
+ const PreParserExpression& expression) {
+ return expression.IsPropertyWithPrivateFieldKey();
+ }
V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
@@ -1142,16 +1197,23 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void DeclareClassProperty(const PreParserIdentifier& class_name,
const PreParserExpression& property,
+ const PreParserIdentifier& property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name,
ClassInfo* class_info, bool* ok) {
- if (kind == ClassLiteralProperty::FIELD && is_computed_name) {
+ if (kind == ClassLiteralProperty::PUBLIC_FIELD && is_computed_name) {
scope()->DeclareVariableName(
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
CONST);
}
+
+ if (kind == ClassLiteralProperty::PRIVATE_FIELD &&
+ property_name.string_ != nullptr) {
+ DCHECK(track_unresolved_variables_);
+ scope()->DeclareVariableName(property_name.string_, CONST);
+ }
}
V8_INLINE PreParserExpression
@@ -1528,10 +1590,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::NewTargetExpression();
}
- V8_INLINE PreParserExpression FunctionSentExpression(int pos) {
- return PreParserExpression::Default();
- }
-
V8_INLINE PreParserExpression ImportMetaExpression(int pos) {
return PreParserExpression::Default();
}
@@ -1707,4 +1765,4 @@ PreParserExpression PreParser::SpreadCallNew(
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_PREPARSER_H
+#endif // V8_PARSING_PREPARSER_H_
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 3152ab184e..a6dd075fec 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -1071,8 +1071,11 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ != kEndOfInput &&
- !unibrow::IsLineTerminator(c0_)) {
+ bool (*line_terminator_func)(unsigned int) =
+ FLAG_harmony_subsume_json ? unibrow::IsStringLiteralLineTerminator
+ : unibrow::IsLineTerminator;
+
+ while (c0_ != quote && c0_ != kEndOfInput && !line_terminator_func(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -1225,6 +1228,56 @@ void Scanner::ScanDecimalDigits() {
AddLiteralCharAdvance();
}
+bool Scanner::ScanBinaryDigits() {
+ // we must have at least one binary digit after 'b'/'B'
+ if (!IsBinaryDigit(c0_)) return false;
+ while (IsBinaryDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanOctalDigits() {
+ // we must have at least one octal digit after 'o'/'O'
+ if (!IsOctalDigit(c0_)) return false;
+ while (IsOctalDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+
+ return true;
+}
+
+bool Scanner::ScanImplicitOctalDigits(int start_pos) {
+ // (possible) octal number
+ while (true) {
+ if (c0_ == '8' || c0_ == '9') return false;
+ if (c0_ < '0' || '7' < c0_) {
+ // Octal literal finished.
+ octal_pos_ = Location(start_pos, source_pos());
+ octal_message_ = MessageTemplate::kStrictOctalLiteral;
+ break;
+ }
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanHexDigits() {
+ // we must have at least one hex digit after 'x'/'X'
+ if (!IsHexDigit(c0_)) return false;
+ while (IsHexDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanSignedInteger() {
+ if (c0_ == '+' || c0_ == '-') AddLiteralCharAdvance();
+ // we must have at least one decimal digit after 'e'/'E'
+ if (!IsDecimalDigit(c0_)) return false;
+ ScanDecimalDigits();
+ return true;
+}
Token::Value Scanner::ScanNumber(bool seen_period) {
DCHECK(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
@@ -1254,52 +1307,22 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
// an octal number.
if (c0_ == 'x' || c0_ == 'X') {
- // hex number
- kind = HEX;
AddLiteralCharAdvance();
- if (!IsHexDigit(c0_)) {
- // we must have at least one hex digit after 'x'/'X'
- return Token::ILLEGAL;
- }
- while (IsHexDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = HEX;
+ if (!ScanHexDigits()) return Token::ILLEGAL;
} else if (c0_ == 'o' || c0_ == 'O') {
- kind = OCTAL;
AddLiteralCharAdvance();
- if (!IsOctalDigit(c0_)) {
- // we must have at least one octal digit after 'o'/'O'
- return Token::ILLEGAL;
- }
- while (IsOctalDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = OCTAL;
+ if (!ScanOctalDigits()) return Token::ILLEGAL;
} else if (c0_ == 'b' || c0_ == 'B') {
- kind = BINARY;
AddLiteralCharAdvance();
- if (!IsBinaryDigit(c0_)) {
- // we must have at least one binary digit after 'b'/'B'
- return Token::ILLEGAL;
- }
- while (IsBinaryDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = BINARY;
+ if (!ScanBinaryDigits()) return Token::ILLEGAL;
} else if ('0' <= c0_ && c0_ <= '7') {
- // (possible) octal number
kind = IMPLICIT_OCTAL;
- while (true) {
- if (c0_ == '8' || c0_ == '9') {
- at_start = false;
- kind = DECIMAL_WITH_LEADING_ZERO;
- break;
- }
- if (c0_ < '0' || '7' < c0_) {
- // Octal literal finished.
- octal_pos_ = Location(start_pos, source_pos());
- octal_message_ = MessageTemplate::kStrictOctalLiteral;
- break;
- }
- AddLiteralCharAdvance();
+ if (!ScanImplicitOctalDigits(start_pos)) {
+ kind = DECIMAL_WITH_LEADING_ZERO;
+ at_start = false;
}
} else if (c0_ == '8' || c0_ == '9') {
kind = DECIMAL_WITH_LEADING_ZERO;
@@ -1308,6 +1331,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// Parse decimal digits and allow trailing fractional part.
if (kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO) {
+ // This is an optimization for parsing Decimal numbers as Smi's.
if (at_start) {
uint64_t value = 0;
while (IsDecimalDigit(c0_)) {
@@ -1362,17 +1386,14 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
} else if (c0_ == 'e' || c0_ == 'E') {
// scan exponent, if any
DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
+
if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
return Token::ILLEGAL;
+
// scan exponent
AddLiteralCharAdvance();
- if (c0_ == '+' || c0_ == '-')
- AddLiteralCharAdvance();
- if (!IsDecimalDigit(c0_)) {
- // we must have at least one decimal digit after 'e'/'E'
- return Token::ILLEGAL;
- }
- ScanDecimalDigits();
+
+ if (!ScanSignedInteger()) return Token::ILLEGAL;
}
// The source character immediately following a numeric literal must
@@ -1487,7 +1508,6 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD_GROUP('r') \
KEYWORD("return", Token::RETURN) \
KEYWORD_GROUP('s') \
- KEYWORD("sent", Token::SENT) \
KEYWORD("set", Token::SET) \
KEYWORD("static", Token::STATIC) \
KEYWORD("super", Token::SUPER) \
@@ -1510,13 +1530,15 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD) \
KEYWORD_GROUP('_') \
- KEYWORD("__proto__", Token::PROTO_UNDERSCORED)
+ KEYWORD("__proto__", Token::PROTO_UNDERSCORED) \
+ KEYWORD_GROUP('#') \
+ KEYWORD("#constructor", Token::PRIVATE_CONSTRUCTOR)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length) {
DCHECK_GE(input_length, 1);
const int kMinLength = 2;
- const int kMaxLength = 11;
+ const int kMaxLength = 12;
if (input_length < kMinLength || input_length > kMaxLength) {
return Token::IDENTIFIER;
}
@@ -1551,6 +1573,9 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
}
return Token::IDENTIFIER;
+#undef KEYWORDS
+#undef KEYWORD
+#undef KEYWORD_GROUP_CASE
}
Token::Value Scanner::ScanIdentifierOrKeyword() {
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index f5106990ff..fe7d754319 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -721,6 +721,12 @@ class Scanner {
Token::Value ScanHtmlComment();
void ScanDecimalDigits();
+ bool ScanHexDigits();
+ bool ScanBinaryDigits();
+ bool ScanSignedInteger();
+ bool ScanOctalDigits();
+ bool ScanImplicitOctalDigits(int start_pos);
+
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 07974edf41..660f24361c 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -188,13 +188,13 @@ namespace internal {
C(SET, "set", 0) \
C(OF, "of", 0) \
C(TARGET, "target", 0) \
- C(SENT, "sent", 0) \
C(META, "meta", 0) \
C(AS, "as", 0) \
C(FROM, "from", 0) \
C(NAME, "name", 0) \
C(PROTO_UNDERSCORED, "__proto__", 0) \
C(CONSTRUCTOR, "constructor", 0) \
+ C(PRIVATE_CONSTRUCTOR, "#constructor", 0) \
C(PROTOTYPE, "prototype", 0) \
C(EVAL, "eval", 0) \
C(ARGUMENTS, "arguments", 0) \
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 7ccd02ef9b..ac1362c9a9 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -31,8 +31,10 @@
#include "src/assembler.h"
#include "src/eh-frame.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/source-position-table.h"
+#include "src/wasm/wasm-code-manager.h"
#if V8_OS_LINUX
#include <fcntl.h>
@@ -213,7 +215,11 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// Debug info has to be emitted first.
if (FLAG_perf_prof && shared != nullptr) {
- LogWriteDebugInfo(code, shared);
+ // TODO(herhut): This currently breaks for js2wasm/wasm2js functions.
+ if (code->kind() != Code::JS_TO_WASM_FUNCTION &&
+ code->kind() != Code::WASM_TO_JS_FUNCTION) {
+ LogWriteDebugInfo(code, shared);
+ }
}
const char* code_name = name;
@@ -226,6 +232,58 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
+ WriteJitCodeLoadEntry(code_pointer, code_size, code_name, length);
+}
+
+void PerfJitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ if (perf_output_handle_ == nullptr) return;
+
+ WriteJitCodeLoadEntry(code->instructions().start(),
+ code->instructions().length(), name, length);
+}
+
+void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
+ uint32_t code_size, const char* name,
+ int name_length) {
+ static const char string_terminator[] = "\0";
+
+ PerfJitCodeLoad code_load;
+ code_load.event_ = PerfJitCodeLoad::kLoad;
+ code_load.size_ = sizeof(code_load) + name_length + 1 + code_size;
+ code_load.time_stamp_ = GetTimestamp();
+ code_load.process_id_ =
+ static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+ code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
+ code_load.vma_ = reinterpret_cast<uint64_t>(code_pointer);
+ code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
+ code_load.code_size_ = code_size;
+ code_load.code_id_ = code_index_;
+
+ code_index_++;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+ LogWriteBytes(name, name_length);
+ LogWriteBytes(string_terminator, 1);
+ LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+void PerfJitLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ if (FLAG_perf_basic_prof_only_functions) return;
+
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ if (perf_output_handle_ == nullptr) return;
+
+ const char* code_name = name;
+ uint8_t* code_pointer = stream->bytes();
+ uint32_t code_size = static_cast<uint32_t>(stream->byte_length());
+
+ // TODO(jgruber): Do we need unwinding info?
+
static const char string_terminator[] = "\0";
PerfJitCodeLoad code_load;
@@ -288,6 +346,8 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
entry_count++;
}
if (entry_count == 0) return;
+ // The WasmToJS wrapper stubs have source position entries.
+ if (!shared->HasSourceCode()) return;
Handle<Script> script(Script::cast(shared->script()));
PerfJitCodeDebugInfo debug_info;
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 2b0b4831e0..8e7c6b5939 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -54,6 +54,10 @@ class PerfJitLogger : public CodeEventLogger {
uint64_t GetTimestamp();
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -63,6 +67,9 @@ class PerfJitLogger : public CodeEventLogger {
// minimize the associated overhead.
static const int kLogBufferSize = 2 * MB;
+ void WriteJitCodeLoadEntry(const uint8_t* code_pointer, uint32_t code_size,
+ const char* name, int name_length);
+
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
@@ -126,9 +133,20 @@ class PerfJitLogger : public CodeEventLogger {
const char* name, int length) override {
UNIMPLEMENTED();
}
+
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override {
+ UNIMPLEMENTED();
+ }
+
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override {
+ UNIMPLEMENTED();
+ }
};
#endif // V8_OS_LINUX
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_PERF_JIT_H_
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 451a1afa46..166a854638 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -46,8 +46,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
@@ -61,8 +60,8 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, constant_pool_);
- Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
- target + delta, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(pc_, constant_pool_, target + delta,
+ SKIP_ICACHE_FLUSH);
}
}
@@ -177,7 +176,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -199,15 +198,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -217,15 +216,15 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -241,7 +240,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
@@ -414,16 +413,15 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
@@ -431,11 +429,9 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
if (IsConstantPoolLoadStart(pc, &access)) {
@@ -477,7 +473,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, 5 * kInstrSize);
+ Assembler::FlushICache(p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -492,7 +488,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, 2 * kInstrSize);
+ Assembler::FlushICache(p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 90b18b02ba..eb16e46505 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -170,22 +170,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -240,7 +238,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
}
Address pc = buffer_ + request.offset();
Address constant_pool = nullptr;
- set_target_address_at(nullptr, pc, constant_pool,
+ set_target_address_at(pc, constant_pool,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
}
@@ -2093,8 +2091,7 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
- set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
- SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, buffer_ + pos, SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
@@ -2150,10 +2147,6 @@ PatchingAssembler::~PatchingAssembler() {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
-void PatchingAssembler::FlushICache(Isolate* isolate) {
- Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 0204d65fa5..271c6e69db 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -373,18 +373,12 @@ C_REGISTERS(DECLARE_C_REGISTER)
// -----------------------------------------------------------------------------
// Machine instruction Operands
-#if V8_TARGET_ARCH_PPC64
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
-#else
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
-#endif
-
// Class Operand represents a shifter operand in data processing instructions
class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rmode_(rmode)) {
value_.immediate = immediate;
}
@@ -394,7 +388,7 @@ class Operand BASE_EMBEDDED {
value_.immediate = reinterpret_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
// rm
@@ -581,7 +575,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -595,12 +589,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1664,8 +1657,6 @@ class PatchingAssembler : public Assembler {
public:
PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
~PatchingAssembler();
-
- void FlushICache(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 5c3d38786f..742d89a590 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -449,6 +449,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
+ // Reset the masking register.
+ if (FLAG_branch_load_poisoning) {
+ __ ResetSpeculationPoisonRegister();
+ }
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(ip, Operand(pending_handler_entrypoint_address));
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 4641dc260c..b54a44c6ed 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -36,10 +36,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 7e962e7849..5564fd9c32 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -269,6 +269,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
}
+ break;
case 's': {
DCHECK_EQ(format[1], 'h');
int32_t value = 0;
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index ee7f29937b..c822de877b 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_FRAMES_PPC_H_
-#define V8_PPC_FRAMES_PPC_H_
+#ifndef V8_PPC_FRAME_CONSTANTS_PPC_H_
+#define V8_PPC_FRAME_CONSTANTS_PPC_H_
#include "src/frame-constants.h"
@@ -47,4 +47,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_FRAMES_PPC_H_
+#endif // V8_PPC_FRAME_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 069fcb26ad..23245b153b 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -67,12 +67,6 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r5, r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@@ -290,8 +284,8 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 8d7c3d05b4..68efa84c72 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -832,40 +833,28 @@ void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
-void TurboAssembler::LoadConstantPoolPointerRegister(Register base,
- int code_start_delta) {
- add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
- code_start_delta);
-}
-
void TurboAssembler::LoadConstantPoolPointerRegister() {
mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
}
-void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
- int prologue_offset) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
mov(r11, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(r11);
}
if (FLAG_enable_embedded_constant_pool) {
- if (base != no_reg) {
- // base contains prologue address
- LoadConstantPoolPointerRegister(base, -prologue_offset);
- } else {
- LoadConstantPoolPointerRegister();
- }
+ LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
-void TurboAssembler::Prologue(Register base, int prologue_offset) {
+void TurboAssembler::Prologue() {
DCHECK(base != no_reg);
PushStandardFrame(r4);
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
- LoadConstantPoolPointerRegister(base, -prologue_offset);
+ LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
@@ -1189,14 +1178,33 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r7, Operand(debug_is_active));
+ LoadByte(r7, MemOperand(r7), r0);
+ extsb(r7, r7);
+ CmpSmiLiteral(r7, Smi::kZero, r0);
+ beq(&skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r7, Operand(debug_hook_avtive));
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
CmpSmiLiteral(r7, Smi::kZero, r0);
- beq(&skip_hook);
+ bne(&call_hook);
+
+ LoadP(r7, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r7, &skip_hook);
+ LoadP(r7, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
+ SmiUntag(r0, r7);
+ andi(r0, r0, Operand(DebugInfo::kBreakAtEntry));
+ beq(&skip_hook, cr0);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1253,7 +1261,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = ip;
+ Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1307,14 +1315,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r4, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r4, function);
- InvokeFunction(r4, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1626,6 +1626,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
@@ -1667,7 +1672,7 @@ void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2922,6 +2927,9 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
}
#endif
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index c67ef4ab90..f4d9afd47f 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -15,20 +15,23 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = r3;
-const Register kReturnRegister1 = r4;
-const Register kReturnRegister2 = r5;
-const Register kJSFunctionRegister = r4;
-const Register kContextRegister = r30;
-const Register kAllocateSizeRegister = r4;
-const Register kInterpreterAccumulatorRegister = r3;
-const Register kInterpreterBytecodeOffsetRegister = r15;
-const Register kInterpreterBytecodeArrayRegister = r16;
-const Register kInterpreterDispatchTableRegister = r17;
-const Register kJavaScriptCallArgCountRegister = r3;
-const Register kJavaScriptCallNewTargetRegister = r6;
-const Register kRuntimeCallFunctionRegister = r4;
-const Register kRuntimeCallArgCountRegister = r3;
+constexpr Register kReturnRegister0 = r3;
+constexpr Register kReturnRegister1 = r4;
+constexpr Register kReturnRegister2 = r5;
+constexpr Register kJSFunctionRegister = r4;
+constexpr Register kContextRegister = r30;
+constexpr Register kAllocateSizeRegister = r4;
+constexpr Register kSpeculationPoisonRegister = r14;
+constexpr Register kInterpreterAccumulatorRegister = r3;
+constexpr Register kInterpreterBytecodeOffsetRegister = r15;
+constexpr Register kInterpreterBytecodeArrayRegister = r16;
+constexpr Register kInterpreterDispatchTableRegister = r17;
+constexpr Register kJavaScriptCallArgCountRegister = r3;
+constexpr Register kJavaScriptCallNewTargetRegister = r6;
+constexpr Register kJavaScriptCallCodeStartRegister = r5;
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r4;
+constexpr Register kRuntimeCallArgCountRegister = r3;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -172,9 +175,8 @@ class TurboAssembler : public Assembler {
void PushCommonFrame(Register marker_reg = no_reg);
// Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type, Register base = no_reg,
- int prologue_offset = 0);
- void Prologue(Register base, int prologue_offset = 0);
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
@@ -639,7 +641,6 @@ class TurboAssembler : public Assembler {
void CallStubDelayed(CodeStub* stub);
void LoadConstantPoolPointerRegister();
- void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
void AbortConstantPoolBuilding() {
#ifdef DEBUG
// Avoid DCHECK(!is_linked()) failure in ~Label()
@@ -647,6 +648,8 @@ class TurboAssembler : public Assembler {
#endif
}
+ void ResetSpeculationPoisonRegister();
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -829,10 +832,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
void DebugBreak();
// Frame restart support
void MaybeDropFrames();
@@ -933,6 +932,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index a92e5363ea..6c517038bb 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -639,8 +639,7 @@ void PPCDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -738,11 +737,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_PPC64
@@ -2161,50 +2155,50 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
- case STFSUX: {
- case STFSX:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int rb = instr->RBValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- intptr_t rb_val = get_register(rb);
- float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+ case STFSUX: V8_FALLTHROUGH;
+ case STFSX: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- // Conversion using double changes sNan to qNan on ia32/x64
- int32_t sval = 0;
- int64_t dval = get_d_register(frs);
- if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
- sval = ((dval & 0xC000000000000000) >> 32) |
- ((dval & 0x07FFFFFFE0000000) >> 29);
- p = &sval;
- } else {
- p = reinterpret_cast<int32_t*>(&frs_val);
- }
-#else
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
+ p = &sval;
+ } else {
p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + rb_val, *p, instr);
- if (opcode == STFSUX) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + rb_val);
- }
- break;
+ WriteW(ra_val + rb_val, *p, instr);
+ if (opcode == STFSUX) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
}
- case STFDUX: {
- case STFDX:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int rb = instr->RBValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- intptr_t rb_val = get_register(rb);
- int64_t frs_val = get_d_register(frs);
- WriteDW(ra_val + rb_val, frs_val);
- if (opcode == STFDUX) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + rb_val);
- }
- break;
+ case STFDUX: V8_FALLTHROUGH;
+ case STFDX: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + rb_val, frs_val);
+ if (opcode == STFDUX) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
}
case POPCNTW: {
int rs = instr->RSValue();
@@ -3220,36 +3214,35 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
- case STFSU: {
- case STFS:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p;
+ case STFSU: V8_FALLTHROUGH;
+ case STFS: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p;
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- // Conversion using double changes sNan to qNan on ia32/x64
- int32_t sval = 0;
- int64_t dval = get_d_register(frs);
- if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
- sval = ((dval & 0xC000000000000000) >> 32) |
- ((dval & 0x07FFFFFFE0000000) >> 29);
- p = &sval;
- } else {
- p = reinterpret_cast<int32_t*>(&frs_val);
- }
-#else
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
+ p = &sval;
+ } else {
p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + offset, *p, instr);
- if (opcode == STFSU) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + offset);
- }
- break;
+ WriteW(ra_val + offset, *p, instr);
+ if (opcode == STFSU) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
}
-
case STFDU:
case STFD: {
int frs = instr->RSValue();
@@ -3916,7 +3909,7 @@ void Simulator::Trace(Instruction* instr) {
// Executes the current instruction.
void Simulator::ExecuteInstruction(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 544b9d463e..45b350b742 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -190,6 +190,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -328,9 +329,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 8ee4527234..b2b9392319 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -5,6 +5,7 @@
#include "src/profiler/allocation-tracker.h"
#include "src/frames-inl.h"
+#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index a84fd4a8fd..cd9e120db2 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -154,4 +154,4 @@ class AllocationTracker {
} // namespace internal
} // namespace v8
-#endif // V8_ALLOCATION_TRACKER_H_
+#endif // V8_PROFILER_ALLOCATION_TRACKER_H_
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a915ebd511..841ce6000f 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -208,7 +208,7 @@ int CpuProfiler::GetProfilesCount() {
CpuProfile* CpuProfiler::GetProfile(int index) {
- return profiles_->profiles()->at(index);
+ return profiles_->profiles()->at(index).get();
}
@@ -220,7 +220,6 @@ void CpuProfiler::DeleteAllProfiles() {
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
profiles_->RemoveProfile(profile);
- delete profile;
if (profiles_->profiles()->empty() && !is_profiling_) {
// If this was the last profile, clean up all accessory data as well.
ResetProfiles();
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 8f0afdc771..9dbe3ff5bd 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -17,28 +17,22 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: ids_(new HeapObjectsMap(heap)),
names_(new StringsStorage(heap)),
- is_tracking_object_moves_(false),
- get_retainer_infos_callback_(nullptr) {}
-
-static void DeleteHeapSnapshot(HeapSnapshot* snapshot_ptr) {
- delete snapshot_ptr;
-}
-
-
-HeapProfiler::~HeapProfiler() {
- std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
-}
+ is_tracking_object_moves_(false) {}
+HeapProfiler::~HeapProfiler() = default;
void HeapProfiler::DeleteAllSnapshots() {
- std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
snapshots_.clear();
names_.reset(new StringsStorage(heap()));
}
void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.erase(std::find(snapshots_.begin(), snapshots_.end(), snapshot));
+ snapshots_.erase(
+ std::find_if(snapshots_.begin(), snapshots_.end(),
+ [&](const std::unique_ptr<HeapSnapshot>& entry) {
+ return entry.get() == snapshot;
+ }));
}
@@ -75,6 +69,18 @@ v8::HeapProfiler::RetainerInfos HeapProfiler::GetRetainerInfos(
return infos;
}
+void HeapProfiler::SetBuildEmbedderGraphCallback(
+ v8::HeapProfiler::BuildEmbedderGraphCallback callback) {
+ build_embedder_graph_callback_ = callback;
+}
+
+void HeapProfiler::BuildEmbedderGraph(Isolate* isolate,
+ v8::EmbedderGraph* graph) {
+ if (build_embedder_graph_callback_ != nullptr)
+ build_embedder_graph_callback_(reinterpret_cast<v8::Isolate*>(isolate),
+ graph);
+}
+
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
@@ -85,7 +91,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
delete result;
result = nullptr;
} else {
- snapshots_.push_back(result);
+ snapshots_.emplace_back(result);
}
}
ids_->RemoveDeadEntries();
@@ -153,7 +159,7 @@ int HeapProfiler::GetSnapshotsCount() {
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- return snapshots_.at(index);
+ return snapshots_.at(index).get();
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index da6814ddcb..d37a882805 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -8,7 +8,11 @@
#include <memory>
#include <vector>
-#include "src/isolate.h"
+#include "include/v8-profiler.h"
+#include "src/base/platform/mutex.h"
+#include "src/debug/debug-interface.h"
+#include "src/globals.h"
+#include "src/heap/heap.h"
namespace v8 {
namespace internal {
@@ -65,9 +69,15 @@ class HeapProfiler {
void SetGetRetainerInfosCallback(
v8::HeapProfiler::GetRetainerInfosCallback callback);
-
v8::HeapProfiler::RetainerInfos GetRetainerInfos(Isolate* isolate);
+ void SetBuildEmbedderGraphCallback(
+ v8::HeapProfiler::BuildEmbedderGraphCallback callback);
+ void BuildEmbedderGraph(Isolate* isolate, v8::EmbedderGraph* graph);
+ bool HasBuildEmbedderGraphCallback() {
+ return build_embedder_graph_callback_ != nullptr;
+ }
+
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
bool is_tracking_allocations() const { return !!allocation_tracker_; }
@@ -85,14 +95,17 @@ class HeapProfiler {
// Mapping from HeapObject addresses to objects' uids.
std::unique_ptr<HeapObjectsMap> ids_;
- std::vector<HeapSnapshot*> snapshots_;
+ std::vector<std::unique_ptr<HeapSnapshot>> snapshots_;
std::unique_ptr<StringsStorage> names_;
std::vector<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
- v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_;
+ v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_ =
+ nullptr;
+ v8::HeapProfiler::BuildEmbedderGraphCallback build_embedder_graph_callback_ =
+ nullptr;
DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 40779d9e5f..b1e033c5f5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -10,6 +10,7 @@
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
+#include "src/global-handles.h"
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
#include "src/objects-inl.h"
@@ -176,7 +177,7 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
((kPointerSize == 8) && (sizeof(HeapGraphEdge) == 24)));
STATIC_ASSERT(((kPointerSize == 4) && (sizeof(HeapEntry) == 28)) ||
((kPointerSize == 8) && (sizeof(HeapEntry) == 40)));
- for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ for (int i = 0; i < static_cast<int>(Root::kNumberOfRoots); ++i) {
gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
}
}
@@ -184,7 +185,6 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
void HeapSnapshot::Delete() {
profiler_->RemoveSnapshot(this);
- delete this;
}
@@ -197,8 +197,8 @@ void HeapSnapshot::AddSyntheticRootEntries() {
AddRootEntry();
AddGcRootsEntry();
SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId;
- for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
- AddGcSubrootEntry(tag, id);
+ for (int root = 0; root < static_cast<int>(Root::kNumberOfRoots); root++) {
+ AddGcSubrootEntry(static_cast<Root>(root), id);
id += HeapObjectsMap::kObjectIdStep;
}
DCHECK_EQ(HeapObjectsMap::kFirstAvailableObjectId, id);
@@ -230,13 +230,11 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
return entry;
}
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
- DCHECK_EQ(gc_subroot_indexes_[tag], HeapEntry::kNoEntry);
- DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
- VisitorSynchronization::kTagNames[tag], id, 0, 0);
- gc_subroot_indexes_[tag] = entry->index();
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(Root root, SnapshotObjectId id) {
+ DCHECK_EQ(gc_subroot_indexes_[static_cast<int>(root)], HeapEntry::kNoEntry);
+ HeapEntry* entry =
+ AddEntry(HeapEntry::kSynthetic, RootVisitor::RootName(root), id, 0, 0);
+ gc_subroot_indexes_[static_cast<int>(root)] = entry->index();
return entry;
}
@@ -307,7 +305,7 @@ const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
HeapObjectsMap::kGcRootsFirstSubrootId +
- VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+ static_cast<int>(Root::kNumberOfRoots) * HeapObjectsMap::kObjectIdStep;
HeapObjectsMap::HeapObjectsMap(Heap* heap)
: next_id_(kFirstAvailableObjectId), heap_(heap) {
@@ -733,15 +731,15 @@ class SnapshotFiller {
HeapEntry* parent_entry = &snapshot_->entries()[parent];
parent_entry->SetNamedReference(type, reference_name, child_entry);
}
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type, int parent,
+ const char* description,
HeapEntry* child_entry) {
HeapEntry* parent_entry = &snapshot_->entries()[parent];
int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- names_->GetName(index),
- child_entry);
+ const char* name = description
+ ? names_->GetFormatted("%d / %s", index, description)
+ : names_->GetName(index);
+ parent_entry->SetNamedReference(type, name, child_entry);
}
private:
@@ -857,6 +855,8 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
+ } else if (obj->IsFeedbackCell()) {
+ ExtractFeedbackCellReferences(entry, FeedbackCell::cast(obj));
} else if (obj->IsWeakCell()) {
ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj->IsPropertyCell()) {
@@ -929,11 +929,10 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
- TagObject(js_fun->feedback_vector_cell(),
- "(function feedback vector cell)");
- SetInternalReference(js_fun, entry, "feedback_vector_cell",
- js_fun->feedback_vector_cell(),
- JSFunction::kFeedbackVectorOffset);
+ TagObject(js_fun->feedback_cell(), "(function feedback cell)");
+ SetInternalReference(js_fun, entry, "feedback_cell",
+ js_fun->feedback_cell(),
+ JSFunction::kFeedbackCellOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(js_fun, entry,
"shared", shared_info,
@@ -1148,9 +1147,6 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
"scope_info", shared->scope_info(),
SharedFunctionInfo::kScopeInfoOffset);
SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
const char* construct_stub_name = name ?
@@ -1234,9 +1230,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"relocation_info", code->relocation_info(),
Code::kRelocationInfoOffset);
- SetInternalReference(code, entry,
- "handler_table", code->handler_table(),
- Code::kHandlerTableOffset);
TagObject(code->deoptimization_data(), "(code deopt data)");
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
@@ -1251,6 +1244,13 @@ void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
+void V8HeapExplorer::ExtractFeedbackCellReferences(
+ int entry, FeedbackCell* feedback_cell) {
+ TagObject(feedback_cell, "(feedback cell)");
+ SetInternalReference(feedback_cell, entry, "value", feedback_cell->value(),
+ FeedbackCell::kValueOffset);
+}
+
void V8HeapExplorer::ExtractWeakCellReferences(int entry, WeakCell* weak_cell) {
TagObject(weak_cell, "(weak cell)");
SetWeakReference(weak_cell, entry, "value", weak_cell->value(),
@@ -1312,23 +1312,9 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
}
void V8HeapExplorer::ExtractJSPromiseReferences(int entry, JSPromise* promise) {
- SetInternalReference(promise, entry, "result", promise->result(),
- JSPromise::kResultOffset);
- SetInternalReference(promise, entry, "deferred_promise",
- promise->deferred_promise(),
- JSPromise::kDeferredPromiseOffset);
- SetInternalReference(promise, entry, "deferred_on_resolve",
- promise->deferred_on_resolve(),
- JSPromise::kDeferredOnResolveOffset);
- SetInternalReference(promise, entry, "deferred_on_reject",
- promise->deferred_on_reject(),
- JSPromise::kDeferredOnRejectOffset);
- SetInternalReference(promise, entry, "fulfill_reactions",
- promise->fulfill_reactions(),
- JSPromise::kFulfillReactionsOffset);
- SetInternalReference(promise, entry, "reject_reactions",
- promise->reject_reactions(),
- JSPromise::kRejectReactionsOffset);
+ SetInternalReference(promise, entry, "reactions_or_result",
+ promise->reactions_or_result(),
+ JSPromise::kReactionsOrResultOffset);
}
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
@@ -1347,12 +1333,20 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
int key_index =
ObjectHashTable::EntryToIndex(i) + ObjectHashTable::kEntryKeyIndex;
int value_index = ObjectHashTable::EntryToValueIndex(i);
- SetWeakReference(table, entry, key_index, table->get(key_index),
+ Object* key = table->get(key_index);
+ Object* value = table->get(value_index);
+ SetWeakReference(table, entry, key_index, key,
table->OffsetOfElementAt(key_index));
- SetInternalReference(table, entry, value_index, table->get(value_index),
+ SetInternalReference(table, entry, value_index, value,
table->OffsetOfElementAt(value_index));
- // TODO(alph): Add a strong link (shortcut?) from key to value per
- // WeakMap the key was added to. See crbug.com/778739
+ HeapEntry* key_entry = GetEntry(key);
+ int key_entry_index = key_entry->index();
+ HeapEntry* value_entry = GetEntry(value);
+ if (key_entry && value_entry) {
+ filler_->SetNamedAutoIndexReference(HeapGraphEdge::kInternal,
+ key_entry_index, "WeakMap",
+ value_entry);
+ }
}
break;
}
@@ -1498,73 +1492,30 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
}
class RootsReferencesExtractor : public RootVisitor {
- private:
- struct IndexTag {
- IndexTag(size_t index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) {}
- size_t index;
- VisitorSynchronization::SyncTag tag;
- };
-
public:
- explicit RootsReferencesExtractor(Heap* heap)
- : collecting_all_references_(false),
- previous_reference_count_(0),
- heap_(heap) {
- }
+ explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
+ : explorer_(explorer), visiting_weak_roots_(false) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.push_back(*p);
- } else {
- for (Object** p = start; p < end; p++) strong_references_.push_back(*p);
- }
- }
+ void SetVisitingWeakRoots() { visiting_weak_roots_ = true; }
- void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
- void FillReferences(V8HeapExplorer* explorer) {
- DCHECK_LE(strong_references_.size(), all_references_.size());
- Builtins* builtins = heap_->isolate()->builtins();
- USE(builtins);
- size_t strong_index = 0, all_index = 0, tags_index = 0;
- int builtin_index = 0;
- while (all_index < all_references_.size()) {
- bool is_strong =
- strong_index < strong_references_.size() &&
- strong_references_[strong_index] == all_references_[all_index];
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- !is_strong,
- all_references_[all_index]);
- if (reference_tags_[tags_index].tag ==
- VisitorSynchronization::kBuiltins) {
- DCHECK(all_references_[all_index]->IsCode());
- explorer->TagBuiltinCodeObject(
- Code::cast(all_references_[all_index]),
- builtins->name(builtin_index++));
- }
- ++all_index;
- if (is_strong) ++strong_index;
- if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ void VisitRootPointer(Root root, const char* description,
+ Object** object) override {
+ if (root == Root::kBuiltins) {
+ explorer_->TagBuiltinCodeObject(Code::cast(*object), description);
}
- CHECK_EQ(strong_index, strong_references_.size());
+ explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
+ *object);
}
- void Synchronize(VisitorSynchronization::SyncTag tag) override {
- if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.size()) {
- previous_reference_count_ = all_references_.size();
- reference_tags_.emplace_back(previous_reference_count_, tag);
- }
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ for (Object** p = start; p < end; p++)
+ VisitRootPointer(root, description, p);
}
private:
- bool collecting_all_references_;
- std::vector<Object*> strong_references_;
- std::vector<Object*> all_references_;
- size_t previous_reference_count_;
- std::vector<IndexTag> reference_tags_;
- Heap* heap_;
+ V8HeapExplorer* explorer_;
+ bool visiting_weak_roots_;
};
@@ -1574,18 +1525,17 @@ bool V8HeapExplorer::IterateAndExtractReferences(
// Create references to the synthetic roots.
SetRootGcRootsReference();
- for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
- SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag));
+ for (int root = 0; root < static_cast<int>(Root::kNumberOfRoots); root++) {
+ SetGcRootsReference(static_cast<Root>(root));
}
// Make sure builtin code objects get their builtin tags
// first. Otherwise a particular JSFunction object could set
// its custom name to a generic builtin.
- RootsReferencesExtractor extractor(heap_);
+ RootsReferencesExtractor extractor(this);
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
+ extractor.SetVisitingWeakRoots();
+ heap_->IterateWeakGlobalHandles(&extractor);
// We have to do two passes as sometimes FixedArrays are used
// to weakly hold their items, and it's impossible to distinguish
@@ -1846,39 +1796,31 @@ void V8HeapExplorer::SetRootGcRootsReference() {
void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
+ filler_->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
+ snapshot_->root()->index(), nullptr,
+ child_entry);
}
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
+void V8HeapExplorer::SetGcRootsReference(Root root) {
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ snapshot_->gc_roots()->index(),
+ snapshot_->gc_subroot(root));
}
-void V8HeapExplorer::SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
+void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
+ bool is_weak, Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
const char* name = GetStrongGcSubrootName(child_obj);
+ HeapGraphEdge::Type edge_type =
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kInternal;
if (name != nullptr) {
- DCHECK(!is_weak);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(), name,
- child_entry);
+ filler_->SetNamedReference(edge_type, snapshot_->gc_subroot(root)->index(),
+ name, child_entry);
} else {
- if (is_weak) {
- filler_->SetNamedAutoIndexReference(HeapGraphEdge::kWeak,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- } else {
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- }
+ filler_->SetNamedAutoIndexReference(edge_type,
+ snapshot_->gc_subroot(root)->index(),
+ description, child_entry);
}
// Add a shortcut to JS global object reference at snapshot root.
@@ -1945,7 +1887,8 @@ void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
class GlobalObjectsEnumerator : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsNativeContext()) continue;
JSObject* proxy = Context::cast(*p)->global_proxy();
@@ -1983,6 +1926,57 @@ void V8HeapExplorer::TagGlobalObjects() {
}
}
+class EmbedderGraphImpl : public EmbedderGraph {
+ public:
+ struct Edge {
+ Node* from;
+ Node* to;
+ };
+
+ class V8NodeImpl : public Node {
+ public:
+ explicit V8NodeImpl(Object* object) : object_(object) {}
+ Object* GetObject() { return object_; }
+
+ // Node overrides.
+ bool IsEmbedderNode() override { return false; }
+ const char* Name() override {
+ // The name should be retrieved via GetObject().
+ UNREACHABLE();
+ return "";
+ }
+ size_t SizeInBytes() override {
+ // The size should be retrieved via GetObject().
+ UNREACHABLE();
+ return 0;
+ }
+
+ private:
+ Object* object_;
+ };
+
+ Node* V8Node(const v8::Local<v8::Value>& value) final {
+ Handle<Object> object = v8::Utils::OpenHandle(*value);
+ DCHECK(!object.is_null());
+ return AddNode(std::unique_ptr<Node>(new V8NodeImpl(*object)));
+ }
+
+ Node* AddNode(std::unique_ptr<Node> node) final {
+ Node* result = node.get();
+ nodes_.push_back(std::move(node));
+ return result;
+ }
+
+ void AddEdge(Node* from, Node* to) final { edges_.push_back({from, to}); }
+
+ const std::vector<std::unique_ptr<Node>>& nodes() { return nodes_; }
+ const std::vector<Edge>& edges() { return edges_; }
+
+ private:
+ std::vector<std::unique_ptr<Node>> nodes_;
+ std::vector<Edge> edges_;
+};
+
class GlobalHandlesExtractor : public PersistentHandleVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
@@ -2034,6 +2028,60 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
0);
}
+class EmbedderGraphEntriesAllocator : public HeapEntriesAllocator {
+ public:
+ explicit EmbedderGraphEntriesAllocator(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()) {}
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+
+ private:
+ HeapSnapshot* snapshot_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
+};
+
+namespace {
+
+const char* EmbedderGraphNodeName(StringsStorage* names,
+ EmbedderGraphImpl::Node* node) {
+ const char* prefix = node->NamePrefix();
+ return prefix ? names->GetFormatted("%s %s", prefix, node->Name())
+ : names->GetCopy(node->Name());
+}
+
+HeapEntry::Type EmbedderGraphNodeType(EmbedderGraphImpl::Node* node) {
+ return HeapEntry::kNative;
+}
+
+// Merges the names of an embedder node and its wrapper node.
+// If the wrapper node name contains a tag suffix (part after '/') then the
+// result is the embedder node name concatenated with the tag suffix.
+// Otherwise, the result is the embedder node name.
+const char* MergeNames(StringsStorage* names, const char* embedder_name,
+ const char* wrapper_name) {
+ for (const char* suffix = wrapper_name; *suffix; suffix++) {
+ if (*suffix == '/') {
+ return names->GetFormatted("%s %s", embedder_name, suffix);
+ }
+ }
+ return embedder_name;
+}
+
+} // anonymous namespace
+
+HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(HeapThing ptr) {
+ EmbedderGraphImpl::Node* node =
+ reinterpret_cast<EmbedderGraphImpl::Node*>(ptr);
+ DCHECK(node->IsEmbedderNode());
+ size_t size = node->SizeInBytes();
+ return snapshot_->AddEntry(
+ EmbedderGraphNodeType(node), EmbedderGraphNodeName(names_, node),
+ static_cast<SnapshotObjectId>(reinterpret_cast<uintptr_t>(node) << 1),
+ static_cast<int>(size), 0);
+}
+
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
: isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
@@ -2042,13 +2090,13 @@ NativeObjectsExplorer::NativeObjectsExplorer(
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
native_groups_(StringsMatch),
- filler_(nullptr) {
- synthetic_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
- native_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
-}
-
+ synthetic_entries_allocator_(
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic)),
+ native_entries_allocator_(
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative)),
+ embedder_graph_entries_allocator_(
+ new EmbedderGraphEntriesAllocator(snapshot)),
+ filler_(nullptr) {}
NativeObjectsExplorer::~NativeObjectsExplorer() {
for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
@@ -2066,8 +2114,6 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
info->Dispose();
}
- delete synthetic_entries_allocator_;
- delete native_entries_allocator_;
}
@@ -2114,13 +2160,14 @@ void NativeObjectsExplorer::FillEdges() {
*pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* parent = HeapObject::cast(*parent_object);
int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+ filler_->FindOrAddEntry(parent, native_entries_allocator_.get())
+ ->index();
DCHECK_NE(parent_entry, HeapEntry::kNoEntry);
Handle<Object> child_object = v8::Utils::OpenHandle(
*pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* child = HeapObject::cast(*child_object);
HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
+ filler_->FindOrAddEntry(child, native_entries_allocator_.get());
filler_->SetNamedReference(HeapGraphEdge::kInternal, parent_entry, "native",
child_entry);
}
@@ -2139,25 +2186,83 @@ std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
return reinterpret_cast<std::vector<HeapObject*>*>(entry->value);
}
+HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
+ EmbedderGraphImpl::Node* node) {
+ EmbedderGraphImpl::Node* wrapper = node->WrapperNode();
+ if (wrapper) {
+ node = wrapper;
+ }
+ if (node->IsEmbedderNode()) {
+ return filler_->FindOrAddEntry(node,
+ embedder_graph_entries_allocator_.get());
+ } else {
+ EmbedderGraphImpl::V8NodeImpl* v8_node =
+ static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
+ Object* object = v8_node->GetObject();
+ if (object->IsSmi()) return nullptr;
+ HeapEntry* entry = filler_->FindEntry(HeapObject::cast(object));
+ return entry;
+ }
+}
bool NativeObjectsExplorer::IterateAndExtractReferences(
SnapshotFiller* filler) {
filler_ = filler;
- FillRetainedObjects();
- FillEdges();
- if (EstimateObjectsCount() > 0) {
- for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- std::vector<HeapObject*>* objects =
- reinterpret_cast<std::vector<HeapObject*>*>(p->value);
- for (HeapObject* object : *objects) {
- SetWrapperNativeReferences(object, info);
+
+ if (FLAG_heap_profiler_use_embedder_graph &&
+ snapshot_->profiler()->HasBuildEmbedderGraphCallback()) {
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+ DisallowHeapAllocation no_allocation;
+ EmbedderGraphImpl graph;
+ snapshot_->profiler()->BuildEmbedderGraph(isolate_, &graph);
+ for (const auto& node : graph.nodes()) {
+ if (node->IsRootNode()) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement, snapshot_->root()->index(),
+ EntryForEmbedderGraphNode(node.get()));
+ }
+ // Adjust the name and the type of the V8 wrapper node.
+ auto wrapper = node->WrapperNode();
+ if (wrapper) {
+ HeapEntry* wrapper_entry = EntryForEmbedderGraphNode(wrapper);
+ wrapper_entry->set_name(
+ MergeNames(names_, EmbedderGraphNodeName(names_, node.get()),
+ wrapper_entry->name()));
+ wrapper_entry->set_type(EmbedderGraphNodeType(node.get()));
+ }
+ }
+ // Fill edges of the graph.
+ for (const auto& edge : graph.edges()) {
+ HeapEntry* from = EntryForEmbedderGraphNode(edge.from);
+ // The |from| and |to| can nullptr if the corrsponding node is a V8 node
+ // pointing to a Smi.
+ if (!from) continue;
+ // Adding an entry for |edge.to| can invalidate the |from| entry because
+ // it is an address in std::vector. Use index instead of pointer.
+ int from_index = from->index();
+ HeapEntry* to = EntryForEmbedderGraphNode(edge.to);
+ if (to) {
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ from_index, to);
+ }
+ }
+ } else {
+ FillRetainedObjects();
+ FillEdges();
+ if (EstimateObjectsCount() > 0) {
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ SetNativeRootReference(info);
+ std::vector<HeapObject*>* objects =
+ reinterpret_cast<std::vector<HeapObject*>*>(p->value);
+ for (HeapObject* object : *objects) {
+ SetWrapperNativeReferences(object, info);
+ }
}
+ SetRootNativeRootsReference();
}
- SetRootNativeRootsReference();
}
filler_ = nullptr;
return true;
@@ -2210,19 +2315,17 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
void NativeObjectsExplorer::SetNativeRootReference(
v8::RetainedObjectInfo* info) {
HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
+ filler_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(child_entry);
NativeGroupRetainedObjectInfo* group_info =
FindOrAddGroupInfo(info->GetGroupLabel());
HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
+ filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_.get());
// |FindOrAddEntry| can move and resize the entries backing store. Reload
// potentially-stale pointer.
child_entry = filler_->FindEntry(info);
filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
+ HeapGraphEdge::kInternal, group_entry->index(), nullptr, child_entry);
}
@@ -2231,7 +2334,7 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
DCHECK_NOT_NULL(wrapper_entry);
HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
+ filler_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(info_entry);
filler_->SetNamedReference(HeapGraphEdge::kInternal,
wrapper_entry->index(),
@@ -2249,7 +2352,7 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
NativeGroupRetainedObjectInfo* group_info =
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_);
+ filler_->FindOrAddEntry(group_info, native_entries_allocator_.get());
DCHECK_NOT_NULL(group_entry);
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 2dacd5a9fe..5c7d88e0ca 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -113,6 +113,7 @@ class HeapEntry BASE_EMBEDDED {
HeapSnapshot* snapshot() { return snapshot_; }
Type type() const { return static_cast<Type>(type_); }
+ void set_type(Type type) { type_ = type; }
const char* name() const { return name_; }
void set_name(const char* name) { name_ = name; }
SnapshotObjectId id() const { return id_; }
@@ -165,8 +166,8 @@ class HeapSnapshot {
HeapProfiler* profiler() { return profiler_; }
HeapEntry* root() { return &entries_[root_index_]; }
HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
+ HeapEntry* gc_subroot(Root root) {
+ return &entries_[gc_subroot_indexes_[static_cast<int>(root)]];
}
std::vector<HeapEntry>& entries() { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
@@ -191,12 +192,12 @@ class HeapSnapshot {
private:
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
+ HeapEntry* AddGcSubrootEntry(Root root, SnapshotObjectId id);
HeapProfiler* profiler_;
int root_index_;
int gc_roots_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+ int gc_subroot_indexes_[static_cast<int>(Root::kNumberOfRoots)];
std::vector<HeapEntry> entries_;
std::deque<HeapGraphEdge> edges_;
std::deque<HeapGraphEdge*> children_;
@@ -384,6 +385,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeReferences(int entry, Code* code);
void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractFeedbackCellReferences(int entry, FeedbackCell* feedback_cell);
void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
@@ -445,9 +447,9 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetUserGlobalReference(Object* user_global);
void SetRootGcRootsReference();
- void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
- void SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
+ void SetGcRootsReference(Root root);
+ void SetGcSubrootReference(Root root, const char* description, bool is_weak,
+ Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
void TagFixedArraySubType(const FixedArray* array,
@@ -514,6 +516,8 @@ class NativeObjectsExplorer {
NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+ HeapEntry* EntryForEmbedderGraphNode(EmbedderGraph::Node* node);
+
Isolate* isolate_;
HeapSnapshot* snapshot_;
StringsStorage* names_;
@@ -522,8 +526,9 @@ class NativeObjectsExplorer {
// RetainedObjectInfo* -> std::vector<HeapObject*>*
base::CustomMatcherHashMap objects_by_info_;
base::CustomMatcherHashMap native_groups_;
- HeapEntriesAllocator* synthetic_entries_allocator_;
- HeapEntriesAllocator* native_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> synthetic_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> native_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> embedder_graph_entries_allocator_;
// Used during references extraction.
SnapshotFiller* filler_;
v8::HeapProfiler::RetainerEdges edges_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 5a7017ad49..970d462937 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -13,7 +13,8 @@ namespace internal {
CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name,
int line_number, int column_number,
- JITLineInfoTable* line_info, Address instruction_start)
+ std::unique_ptr<JITLineInfoTable> line_info,
+ Address instruction_start)
: bit_field_(TagField::encode(tag) |
BuiltinIdField::encode(Builtins::builtin_count)),
name_prefix_(name_prefix),
@@ -26,7 +27,7 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_id_(kNoDeoptimizationId),
- line_info_(line_info),
+ line_info_(std::move(line_info)),
instruction_start_(instruction_start) {}
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index bb6ede6d95..9786741b94 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -85,16 +85,6 @@ CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
CodeEntry::kUnresolvedFunctionName);
}
-CodeEntry::~CodeEntry() {
- delete line_info_;
- for (auto location : inline_locations_) {
- for (auto entry : location.second) {
- delete entry;
- }
- }
-}
-
-
uint32_t CodeEntry::GetHash() const {
uint32_t hash = ComputeIntegerHash(tag());
if (script_id_ != v8::UnboundScript::kNoScriptId) {
@@ -137,12 +127,13 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
return v8::CpuProfileNode::kNoLineNumberInfo;
}
-void CodeEntry::AddInlineStack(int pc_offset,
- std::vector<CodeEntry*> inline_stack) {
+void CodeEntry::AddInlineStack(
+ int pc_offset, std::vector<std::unique_ptr<CodeEntry>> inline_stack) {
inline_locations_.insert(std::make_pair(pc_offset, std::move(inline_stack)));
}
-const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
+const std::vector<std::unique_ptr<CodeEntry>>* CodeEntry::GetInlineStack(
+ int pc_offset) const {
auto it = inline_locations_.find(pc_offset);
return it != inline_locations_.end() ? &it->second : nullptr;
}
@@ -528,9 +519,9 @@ void CodeMap::MoveCode(Address from, Address to) {
}
void CodeMap::Print() {
- for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
- base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
- it->second.size, it->second.entry->name());
+ for (const auto& pair : code_map_) {
+ base::OS::Print("%p %5d %s\n", static_cast<void*>(pair.first),
+ pair.second.size, pair.second.entry->name());
}
}
@@ -539,12 +530,6 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
profiler_(nullptr),
current_profiles_semaphore_(1) {}
-CpuProfilesCollection::~CpuProfilesCollection() {
- for (CpuProfile* profile : finished_profiles_) delete profile;
- for (CpuProfile* profile : current_profiles_) delete profile;
-}
-
-
bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples) {
current_profiles_semaphore_.Wait();
@@ -552,7 +537,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
current_profiles_semaphore_.Signal();
return false;
}
- for (CpuProfile* profile : current_profiles_) {
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
if (strcmp(profile->title(), title) == 0) {
// Ignore attempts to start profile with the same title...
current_profiles_semaphore_.Signal();
@@ -560,7 +545,8 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.push_back(new CpuProfile(profiler_, title, record_samples));
+ current_profiles_.emplace_back(
+ new CpuProfile(profiler_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -570,19 +556,22 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = nullptr;
current_profiles_semaphore_.Wait();
- for (size_t i = current_profiles_.size(); i != 0; --i) {
- CpuProfile* current_profile = current_profiles_[i - 1];
- if (title_len == 0 || strcmp(current_profile->title(), title) == 0) {
- profile = current_profile;
- current_profiles_.erase(current_profiles_.begin() + i - 1);
- break;
- }
+
+ auto it =
+ std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
+ [&](const std::unique_ptr<CpuProfile>& p) {
+ return title_len == 0 || strcmp(p->title(), title) == 0;
+ });
+
+ if (it != current_profiles_.rend()) {
+ (*it)->FinishProfile();
+ profile = it->get();
+ finished_profiles_.push_back(std::move(*it));
+ // Convert reverse iterator to matching forward iterator.
+ current_profiles_.erase(--(it.base()));
}
- current_profiles_semaphore_.Signal();
- if (!profile) return nullptr;
- profile->FinishProfile();
- finished_profiles_.push_back(profile);
+ current_profiles_semaphore_.Signal();
return profile;
}
@@ -599,7 +588,10 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
auto pos =
- std::find(finished_profiles_.begin(), finished_profiles_.end(), profile);
+ std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
+ [&](const std::unique_ptr<CpuProfile>& finished_profile) {
+ return finished_profile.get() == profile;
+ });
DCHECK(pos != finished_profiles_.end());
finished_profiles_.erase(pos);
}
@@ -611,7 +603,7 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
- for (CpuProfile* profile : current_profiles_) {
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->AddPath(timestamp, path, src_line, update_stats);
}
current_profiles_semaphore_.Signal();
@@ -684,11 +676,13 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
static_cast<int>(stack_pos - entry->instruction_start());
- const std::vector<CodeEntry*>* inline_stack =
+ const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
- entries.insert(entries.end(), inline_stack->rbegin(),
- inline_stack->rend());
+ std::transform(
+ inline_stack->rbegin(), inline_stack->rend(),
+ std::back_inserter(entries),
+ [](const std::unique_ptr<CodeEntry>& ptr) { return ptr.get(); });
}
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 819800ae6b..5abb955a46 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -8,6 +8,7 @@
#include <map>
#include <vector>
+#include "include/v8-profiler.h"
#include "src/allocation.h"
#include "src/base/hashmap.h"
#include "src/log.h"
@@ -47,9 +48,8 @@ class CodeEntry {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = nullptr,
+ std::unique_ptr<JITLineInfoTable> line_info = nullptr,
Address instruction_start = nullptr);
- ~CodeEntry();
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
@@ -57,7 +57,7 @@ class CodeEntry {
const char* resource_name() const { return resource_name_; }
int line_number() const { return line_number_; }
int column_number() const { return column_number_; }
- const JITLineInfoTable* line_info() const { return line_info_; }
+ const JITLineInfoTable* line_info() const { return line_info_.get(); }
int script_id() const { return script_id_; }
void set_script_id(int script_id) { script_id_ = script_id; }
int position() const { return position_; }
@@ -91,8 +91,10 @@ class CodeEntry {
int GetSourceLine(int pc_offset) const;
- void AddInlineStack(int pc_offset, std::vector<CodeEntry*> inline_stack);
- const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+ void AddInlineStack(int pc_offset,
+ std::vector<std::unique_ptr<CodeEntry>> inline_stack);
+ const std::vector<std::unique_ptr<CodeEntry>>* GetInlineStack(
+ int pc_offset) const;
void AddDeoptInlinedFrames(int deopt_id, std::vector<CpuProfileDeoptFrame>);
bool HasDeoptInlinedFramesFor(int deopt_id) const;
@@ -160,10 +162,10 @@ class CodeEntry {
const char* bailout_reason_;
const char* deopt_reason_;
int deopt_id_;
- JITLineInfoTable* line_info_;
+ std::unique_ptr<JITLineInfoTable> line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
- std::map<int, std::vector<CodeEntry*>> inline_locations_;
+ std::map<int, std::vector<std::unique_ptr<CodeEntry>>> inline_locations_;
std::map<int, std::vector<CpuProfileDeoptFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@@ -337,12 +339,13 @@ class CodeMap {
class CpuProfilesCollection {
public:
explicit CpuProfilesCollection(Isolate* isolate);
- ~CpuProfilesCollection();
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
- std::vector<CpuProfile*>* profiles() { return &finished_profiles_; }
+ std::vector<std::unique_ptr<CpuProfile>>* profiles() {
+ return &finished_profiles_;
+ }
const char* GetName(Name* name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
@@ -357,11 +360,11 @@ class CpuProfilesCollection {
private:
StringsStorage resource_names_;
- std::vector<CpuProfile*> finished_profiles_;
+ std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
CpuProfiler* profiler_;
// Accessed by VM thread and profile generator thread.
- std::vector<CpuProfile*> current_profiles_;
+ std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
base::Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index bd2f158e60..cec71d70e0 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -5,10 +5,12 @@
#include "src/profiler/profiler-listener.h"
#include "src/deoptimizer.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/source-position-table.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -81,10 +83,10 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = abstract_code->address();
- JITLineInfoTable* line_table = nullptr;
+ std::unique_ptr<JITLineInfoTable> line_table;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
- line_table = new JITLineInfoTable();
+ line_table.reset(new JITLineInfoTable());
int offset = abstract_code->IsCode() ? Code::kHeaderSize
: BytecodeArray::kHeaderSize;
for (SourcePositionTableIterator it(abstract_code->source_position_table());
@@ -101,8 +103,8 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
rec->entry = NewCodeEntry(
tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
- GetName(InferScriptName(script_name, shared)), line, column, line_table,
- abstract_code->instruction_start());
+ GetName(InferScriptName(script_name, shared)), line, column,
+ std::move(line_table), abstract_code->instruction_start());
RecordInliningInfo(rec->entry, abstract_code);
RecordDeoptInlinedFrames(rec->entry, abstract_code);
rec->entry->FillFunctionInfo(shared);
@@ -110,6 +112,24 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code,
+ wasm::WasmName name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->instructions().start();
+ // TODO(herhut): Instead of sanitizing here, make sure all wasm functions
+ // have names.
+ const char* name_ptr =
+ name.start() == nullptr ? "<anonymous>" : GetFunctionName(name.start());
+ rec->entry = NewCodeEntry(
+ tag, name_ptr, CodeEntry::kEmptyNamePrefix, CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ nullptr, code->instructions().start());
+ rec->size = code->instructions().length();
+ DispatchCodeEvent(evt_rec);
+}
+
void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
@@ -164,6 +184,20 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::InstructionStreamCreateEvent(
+ CodeEventListener::LogEventsAndTags tag, const InstructionStream* stream,
+ const char* description) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = stream->bytes();
+ rec->entry = NewCodeEntry(
+ tag, description, CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, stream->bytes());
+ rec->size = static_cast<unsigned>(stream->byte_length());
+ DispatchCodeEvent(evt_rec);
+}
+
void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -199,7 +233,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
DCHECK_EQ(Translation::BEGIN, opcode);
it.Skip(Translation::NumberOfOperandsFor(opcode));
int depth = 0;
- std::vector<CodeEntry*> inline_stack;
+ std::vector<std::unique_ptr<CodeEntry>> inline_stack;
while (it.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(it.Next()))) {
@@ -227,7 +261,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->instruction_start());
inline_entry->FillFunctionInfo(shared_info);
- inline_stack.push_back(inline_entry);
+ inline_stack.emplace_back(inline_entry);
}
if (!inline_stack.empty()) {
entry->AddInlineStack(pc_offset, std::move(inline_stack));
@@ -276,10 +310,11 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
CodeEntry* ProfilerListener::NewCodeEntry(
CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name, int line_number,
- int column_number, JITLineInfoTable* line_info, Address instruction_start) {
+ int column_number, std::unique_ptr<JITLineInfoTable> line_info,
+ Address instruction_start) {
std::unique_ptr<CodeEntry> code_entry = base::make_unique<CodeEntry>(
tag, name, name_prefix, resource_name, line_number, column_number,
- line_info, instruction_start);
+ std::move(line_info), instruction_start);
CodeEntry* raw_code_entry = code_entry.get();
code_entries_.push_back(std::move(code_entry));
return raw_code_entry;
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index c111bf81c4..ca2c213a93 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -37,6 +37,9 @@ class ProfilerListener : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* script_name, int line, int column) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name) override;
+
void CodeMovingGCEvent() override {}
void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
@@ -45,6 +48,9 @@ class ProfilerListener : public CodeEventListener {
int fp_to_sp_delta) override;
void GetterCallbackEvent(Name* name, Address entry_point) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void InstructionStreamCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) override;
void SetterCallbackEvent(Name* name, Address entry_point) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
@@ -54,7 +60,7 @@ class ProfilerListener : public CodeEventListener {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = nullptr,
+ std::unique_ptr<JITLineInfoTable> line_info = nullptr,
Address instruction_start = nullptr);
void AddObserver(CodeEventObserver* observer);
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index fef21550ec..31c885fef0 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -76,11 +76,7 @@ SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
new_space_observer_.get());
- for (auto sample : samples_) {
- delete sample;
- }
- std::set<Sample*> empty;
- samples_.swap(empty);
+ samples_.clear();
}
@@ -101,7 +97,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
AllocationNode* node = AddStack();
node->allocations_[size]++;
Sample* sample = new Sample(size, node, loc, this);
- samples_.insert(sample);
+ samples_.emplace(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
}
@@ -123,8 +119,14 @@ void SamplingHeapProfiler::OnWeakCallback(
node = parent;
}
}
- sample->profiler->samples_.erase(sample);
- delete sample;
+ auto it = std::find_if(sample->profiler->samples_.begin(),
+ sample->profiler->samples_.end(),
+ [&sample](const std::unique_ptr<Sample>& s) {
+ return s.get() == sample;
+ });
+
+ sample->profiler->samples_.erase(it);
+ // sample is deleted because its unique ptr was erased from samples_.
}
SamplingHeapProfiler::AllocationNode*
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 3a347dd54e..46fa405279 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -146,7 +146,7 @@ class SamplingHeapProfiler {
std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
- std::set<Sample*> samples_;
+ std::set<std::unique_ptr<Sample>> samples_;
const int stack_depth_;
const uint64_t rate_;
v8::HeapProfiler::SamplingFlags flags_;
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 2e8ad779fd..9ea7770b4b 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -80,7 +80,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
const char* StringsStorage::GetName(Name* name) {
if (name->IsString()) {
String* str = String::cast(name);
- int length = Min(kMaxNameSize, str->length());
+ int length = Min(FLAG_heap_snapshot_string_limit, str->length());
int actual_length = 0;
std::unique_ptr<char[]> data = str->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index d73a9dd208..834b5a3335 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -31,8 +31,6 @@ class StringsStorage {
const char* GetFunctionName(const char* name);
private:
- static const int kMaxNameSize = 1024;
-
static bool StringsMatch(void* key1, void* key2);
const char* AddOrDisposeString(char* str, int len);
base::CustomMatcherHashMap::Entry* GetEntry(const char* str, int len);
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 44bf9af3d1..a6b8b26d00 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -204,7 +204,12 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
#endif
DCHECK(regs->sp);
- if (regs->pc && IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
+ // Check whether we interrupted setup/teardown of a stack frame in JS code.
+ // Avoid this check for C++ code, as that would trigger false positives.
+ if (regs->pc &&
+ isolate->heap()->memory_allocator()->code_range()->contains(
+ static_cast<i::Address>(regs->pc)) &&
+ IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
return false;
}
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index e654f2be9d..ccd1fa42a2 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
-#define V8_PROFILER_TRACING_CPU_PROFILER_H
+#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H_
+#define V8_PROFILER_TRACING_CPU_PROFILER_H_
#include "include/v8-platform.h"
#include "include/v8-profiler.h"
@@ -43,4 +43,4 @@ class TracingCpuProfilerImpl final
} // namespace internal
} // namespace v8
-#endif // V8_PROFILER_TRACING_CPU_PROFILER_H
+#endif // V8_PROFILER_TRACING_CPU_PROFILER_H_
diff --git a/deps/v8/src/profiler/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index c53b35a8ed..062f1ce609 100644
--- a/deps/v8/src/profiler/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILER_UNBOUND_QUEUE_
-#define V8_PROFILER_UNBOUND_QUEUE_
+#ifndef V8_PROFILER_UNBOUND_QUEUE_H_
+#define V8_PROFILER_UNBOUND_QUEUE_H_
#include "src/allocation.h"
#include "src/base/atomicops.h"
@@ -45,4 +45,4 @@ class UnboundQueue BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_PROFILER_UNBOUND_QUEUE_
+#endif // V8_PROFILER_UNBOUND_QUEUE_H_
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 5c744a1bd7..eccaeb006f 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -7,7 +7,11 @@
#include <iosfwd>
-#include "src/factory.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/objects/name.h"
+#include "src/property-details.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 5f77ff4021..d366349640 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -41,7 +41,7 @@ namespace internal {
* - x29/fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - x16/x17 : IP registers, used by assembler. Very volatile.
- * - csp : Points to tip of C stack.
+ * - sp : Points to tip of C stack.
*
* - x0-x7 : Used as a cache to store 32 bit capture registers. These
* registers need to be retained every time a call to C code
@@ -57,7 +57,7 @@ namespace internal {
* the code)
*
* - fp[96] isolate Address of the current isolate.
- * ^^^ csp when called ^^^
+ * ^^^ sp when called ^^^
* - fp[88] lr Return from the RegExp code.
* - fp[80] r29 Old frame pointer (CalleeSaved).
* - fp[0..72] r19-r28 Backup of CalleeSaved registers.
@@ -77,7 +77,7 @@ namespace internal {
* - ... num_saved_registers_ registers.
* - ...
* - register N + num_registers - 1
- * ^^^^^^^^^ csp ^^^^^^^^^
+ * ^^^^^^^^^ sp ^^^^^^^^^
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
@@ -704,9 +704,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// x6: Address stack_base
// x7: int direct_call
- // The stack pointer should be csp on entry.
- // csp[8]: address of the current isolate
- // csp[0]: secondary link/return address used by native call
+ // sp[8]: address of the current isolate
+ // sp[0]: secondary link/return address used by native call
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
@@ -719,12 +718,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
DCHECK_EQ(11, kCalleeSaved.Count());
registers_to_retain.Combine(lr);
- DCHECK(csp.Is(__ StackPointer()));
__ PushCPURegList(registers_to_retain);
__ PushCPURegList(argument_registers);
// Set frame pointer in place.
- __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+ __ Add(frame_pointer(), sp, argument_registers.Count() * kPointerSize);
// Initialize callee-saved registers.
__ Mov(start_offset(), w1);
@@ -755,7 +753,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
- __ Subs(x10, csp, x10);
+ __ Subs(x10, sp, x10);
// Handle it if the stack pointer is already below the stack limit.
__ B(ls, &stack_limit_hit);
@@ -1015,9 +1013,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Bind(&return_w0);
// Set stack pointer back to first register to retain
- DCHECK(csp.Is(__ StackPointer()));
- __ Mov(csp, fp);
- __ AssertStackConsistency();
+ __ Mov(sp, fp);
// Restore registers.
__ PopCPURegList(registers_to_retain);
@@ -1036,7 +1032,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
CallCheckStackGuardState(x10);
- // Returning from the regexp code restores the stack (csp <- fp)
+ // Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbnz(w0, &return_w0);
// Reset the cached registers.
@@ -1059,7 +1055,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, 3);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (csp <- fp)
+ // Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbz(w0, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1366,14 +1362,13 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
int align_mask = (alignment / kXRegSize) - 1;
int xreg_to_claim = (3 + align_mask) & ~align_mask;
- DCHECK(csp.Is(__ StackPointer()));
__ Claim(xreg_to_claim);
// CheckStackGuardState needs the end and start addresses of the input string.
__ Poke(input_end(), 2 * kPointerSize);
- __ Add(x5, csp, 2 * kPointerSize);
+ __ Add(x5, sp, 2 * kPointerSize);
__ Poke(input_start(), kPointerSize);
- __ Add(x4, csp, kPointerSize);
+ __ Add(x4, sp, kPointerSize);
__ Mov(w3, start_offset());
// RegExp code frame pointer.
@@ -1384,7 +1379,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// We need to pass a pointer to the return address as first argument.
// The DirectCEntry stub will place the return address on the stack before
// calling so the stack pointer will point to it.
- __ Mov(x0, csp);
+ __ Mov(x0, sp);
ExternalReference check_stack_guard_state =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1396,7 +1391,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Peek(input_start(), kPointerSize);
__ Peek(input_end(), 2 * kPointerSize);
- DCHECK(csp.Is(__ StackPointer()));
__ Drop(xreg_to_claim);
// Reload the Code pointer.
@@ -1445,8 +1439,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
- DCHECK(csp.Is(__ StackPointer()));
- __ Cmp(csp, x10);
+ __ Cmp(sp, x10);
CallIf(&check_preempt_label_, ls);
}
@@ -1557,14 +1550,12 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 7ba028020b..2c1b890c4f 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -519,6 +519,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE)
+ V8_FALLTHROUGH;
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
bool unicode =
(insn & BYTECODE_MASK) == BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE;
@@ -537,6 +538,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD)
+ V8_FALLTHROUGH;
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
bool unicode = (insn & BYTECODE_MASK) ==
BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index a26a1d77ce..b90b0a51a6 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -1732,7 +1732,7 @@ static inline bool EmitAtomLetter(Isolate* isolate,
}
case 4:
macro_assembler->CheckCharacter(chars[3], &ok);
- // Fall through!
+ V8_FALLTHROUGH;
case 3:
macro_assembler->CheckCharacter(chars[0], &ok);
macro_assembler->CheckCharacter(chars[1], &ok);
@@ -2768,16 +2768,13 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
- if (c <= String::kMaxOneByteCharCode) continue;
- if (!IgnoreCase(elm.atom()->flags())) return set_replacement(nullptr);
- // Here, we need to check for characters whose upper and lower cases
- // are outside the Latin-1 range.
- uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
- // Character is outside Latin-1 completely
- if (converted == 0) return set_replacement(nullptr);
- // Convert quark to Latin-1 in place.
- uint16_t* copy = const_cast<uint16_t*>(quarks.start());
- copy[j] = converted;
+ if (elm.atom()->ignore_case()) {
+ c = unibrow::Latin1::TryConvertToLatin1(c);
+ }
+ if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
+ // Replace quark in case we converted to Latin-1.
+ uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.start());
+ writable_quarks[j] = c;
}
} else {
DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
@@ -3209,10 +3206,17 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = nullptr;
+ uc16 quark = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ // Everywhere else we assume that a non-Latin-1 character cannot match
+ // a Latin-1 character. Avoid the cases where this is assumption is
+ // invalid by using the Latin1 equivalent instead.
+ quark = unibrow::Latin1::TryConvertToLatin1(quark);
+ }
switch (pass) {
case NON_LATIN1_MATCH:
DCHECK(one_byte);
- if (quarks[j] > String::kMaxOneByteCharCode) {
+ if (quark > String::kMaxOneByteCharCode) {
assembler->GoTo(backtrack);
return;
}
@@ -3232,8 +3236,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (emit_function != nullptr) {
bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
bool bound_checked =
- emit_function(isolate, compiler, quarks[j], backtrack,
- cp_offset + j, bounds_check, preloaded);
+ emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
+ bounds_check, preloaded);
if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
}
}
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index c0023f409b..37c1d3fbb6 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
+#define V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
#include "src/macro-assembler.h"
#include "src/mips64/assembler-mips64.h"
@@ -228,4 +228,4 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index a7724c5d42..72ed5b8d69 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -399,8 +399,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
break;
}
+ V8_FALLTHROUGH;
}
- // Fall through.
case '0': {
Advance();
if (unicode() && Next() >= '0' && Next() <= '9') {
@@ -493,7 +493,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ParseNamedBackReference(builder, state CHECK_FAILED);
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
Advance();
// With /u, no identity escapes except for syntax characters
@@ -511,14 +511,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
int dummy;
bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
if (parsed) return ReportError(CStrVector("Nothing to repeat"));
- // Fall through.
+ V8_FALLTHROUGH;
}
case '}':
case ']':
if (unicode()) {
return ReportError(CStrVector("Lone quantifier brackets"));
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
builder->AddUnicodeCharacter(current());
Advance();
@@ -684,7 +684,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
Advance();
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
ReportError(CStrVector("Invalid group"));
return nullptr;
@@ -1515,7 +1515,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
Advance();
return 0;
}
- // Fall through.
+ V8_FALLTHROUGH;
case '1':
case '2':
case '3':
@@ -1986,8 +1986,14 @@ bool RegExpBuilder::AddQuantifierToAtom(
} else if (terms_.length() > 0) {
DCHECK(last_added_ == ADD_ATOM);
atom = terms_.RemoveLast();
- // With /u, lookarounds are not quantifiable.
- if (unicode() && atom->IsLookaround()) return false;
+ if (atom->IsLookaround()) {
+ // With /u, lookarounds are not quantifiable.
+ if (unicode()) return false;
+ // Lookbehinds are not quantifiable.
+ if (atom->AsLookaround()->type() == RegExpLookaround::LOOKBEHIND) {
+ return false;
+ }
+ }
if (atom->max_match() == 0) {
// Guaranteed to only match an empty string.
LAST(ADD_TERM);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 94603cd7c9..40ba5ece25 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
-#define V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#ifndef V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#define V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -210,4 +210,4 @@ const RegList kRegExpCalleeSaved =
} // namespace internal
} // namespace v8
-#endif // V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#endif // V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 6ecc5519f3..6b472a0b4e 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -64,29 +64,6 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
-static int get_num_allocatable_general_registers() {
- return
-#if V8_TARGET_ARCH_IA32
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_X64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_ARM
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_ARM64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_MIPS
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_MIPS64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_PPC
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_S390
- kMaxAllocatableGeneralRegisterCount;
-#else
-#error Unsupported target architecture.
-#endif
-}
-
static int get_num_allocatable_double_registers() {
return
#if V8_TARGET_ARCH_IA32
@@ -127,7 +104,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
- get_num_allocatable_general_registers(),
+ kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
@@ -145,6 +122,66 @@ static base::LazyInstance<ArchDefaultRegisterConfiguration,
RegisterConfigurationInitializer>::type
kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+// Allocatable registers with the masking register removed.
+class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
+ public:
+ ArchDefaultPoisoningRegisterConfiguration()
+ : RegisterConfiguration(
+ Register::kNumRegisters, DoubleRegister::kNumRegisters,
+ kMaxAllocatableGeneralRegisterCount - 1,
+ get_num_allocatable_double_registers(),
+ InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
+ InitializeGeneralRegisterNames(), kFloatRegisterNames,
+ kDoubleRegisterNames, kSimd128RegisterNames) {}
+
+ private:
+ static char const* const* InitializeGeneralRegisterNames() {
+ int filtered_index = 0;
+ for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
+ if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
+ general_register_names_[filtered_index] = kGeneralRegisterNames[i];
+ filtered_index++;
+ }
+ }
+ DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
+ return general_register_names_;
+ }
+
+ static const int* InitializeGeneralRegisterCodes() {
+ int filtered_index = 0;
+ for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
+ if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
+ allocatable_general_codes_[filtered_index] =
+ kAllocatableGeneralCodes[i];
+ filtered_index++;
+ }
+ }
+ DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
+ return allocatable_general_codes_;
+ }
+
+ static const char*
+ general_register_names_[kMaxAllocatableGeneralRegisterCount - 1];
+ static int
+ allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
+};
+
+const char* ArchDefaultPoisoningRegisterConfiguration::general_register_names_
+ [kMaxAllocatableGeneralRegisterCount - 1];
+int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
+ [kMaxAllocatableGeneralRegisterCount - 1];
+
+struct PoisoningRegisterConfigurationInitializer {
+ static void Construct(void* config) {
+ new (config) ArchDefaultPoisoningRegisterConfiguration();
+ }
+};
+
+static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
+ PoisoningRegisterConfigurationInitializer>::type
+ kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -193,6 +230,10 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return &kDefaultRegisterConfiguration.Get();
}
+const RegisterConfiguration* RegisterConfiguration::Poisoning() {
+ return &kDefaultPoisoningRegisterConfiguration.Get();
+}
+
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 59aeab8742..1299baac69 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
-#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+#ifndef V8_REGISTER_CONFIGURATION_H_
+#define V8_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
#include "src/globals.h"
@@ -31,6 +31,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
// Default RegisterConfigurations for the target architecture.
static const RegisterConfiguration* Default();
+ // Register configuration with reserved masking register.
+ static const RegisterConfiguration* Poisoning();
+
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
@@ -165,4 +168,4 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
+#endif // V8_REGISTER_CONFIGURATION_H_
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f07c842bae..648606a274 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -149,7 +149,8 @@ Object* PrepareElementsForSort(Handle<JSObject> object, uint32_t limit) {
JSObject::ValidateElements(*object);
} else if (object->HasFixedTypedArrayElements()) {
// Typed arrays cannot have holes or undefined elements.
- return Smi::FromInt(FixedArrayBase::cast(object->elements())->length());
+ int array_length = FixedArrayBase::cast(object->elements())->length();
+ return Smi::FromInt(Min(limit, static_cast<uint32_t>(array_length)));
} else if (!object->HasDoubleElements()) {
JSObject::EnsureWritableFastElements(object);
}
@@ -390,7 +391,7 @@ RUNTIME_FUNCTION(Runtime_TrySliceSimpleNonFastElements) {
// implementation.
if (receiver->IsJSArray()) {
// This "fastish" path must make sure the destination array is a JSArray.
- if (!isolate->IsArraySpeciesLookupChainIntact() ||
+ if (!isolate->IsSpeciesLookupChainIntact() ||
!JSArray::cast(*receiver)->HasArrayPrototype(isolate)) {
return Smi::FromInt(0);
}
@@ -532,17 +533,15 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
return *array;
}
-
-// GrowArrayElements returns a sentinel Smi if the object was normalized.
+// GrowArrayElements returns a sentinel Smi if the object was normalized or if
+// the key is negative.
RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(int, key, Int32, args[1]);
- if (key < 0) {
- return object->elements();
- }
+ if (key < 0) return Smi::kZero;
uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
uint32_t index = static_cast<uint32_t>(key);
@@ -553,7 +552,6 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
}
}
- // On success, return the fixed array elements.
return object->elements();
}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 68a7b413b5..9849c694dc 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -249,30 +249,6 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
V(Uint32, uint32, UINT32, uint32_t, 4) \
V(Int32, int32, INT32, int32_t, 4)
-RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
-}
-
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index 47f644f619..ce0d8990a1 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -75,6 +75,13 @@ RUNTIME_FUNCTION(Runtime_BigIntToNumber) {
return *BigInt::ToNumber(x);
}
+RUNTIME_FUNCTION(Runtime_ToBigInt) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x));
+}
+
RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 44e947aafe..efe4f455b1 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -11,24 +11,22 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_TheHole) {
+RUNTIME_FUNCTION(Runtime_IsJSMapIterator) {
SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- return isolate->heap()->the_hole_value();
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSMapIterator());
}
-RUNTIME_FUNCTION(Runtime_GetExistingHash) {
+RUNTIME_FUNCTION(Runtime_IsJSSetIterator) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return object->GetHash();
+ return isolate->heap()->ToBoolean(args[0]->IsJSSetIterator());
}
-RUNTIME_FUNCTION(Runtime_GenericHash) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return object->GetOrCreateHash(isolate);
+RUNTIME_FUNCTION(Runtime_TheHole) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->the_hole_value();
}
RUNTIME_FUNCTION(Runtime_SetGrow) {
@@ -101,15 +99,6 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
return *JSWeakCollection::GetEntries(holder, max_entries);
}
-RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- JSWeakCollection::Initialize(weak_collection, isolate);
- return *weak_collection;
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 96292ad1c5..d149af652b 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -21,15 +21,6 @@ RUNTIME_FUNCTION(Runtime_IsDate) {
return isolate->heap()->ToBoolean(obj->IsJSDate());
}
-
-RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(MessageTemplate::kNotDateObject));
-}
-
-
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index d6e028b41e..daef53280e 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -43,7 +43,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
- isolate->debug()->Break(it.frame());
+ isolate->debug()->Break(it.frame(), handle(it.frame()->function()));
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
@@ -53,21 +53,25 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
BytecodeArray* bytecode_array = shared->bytecode_array();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
- if (bytecode == Bytecode::kReturn) {
- // If we are returning, reset the bytecode array on the interpreted stack
- // frame to the non-debug variant so that the interpreter entry trampoline
- // sees the return bytecode rather than the DebugBreak.
+ if (Bytecodes::Returns(bytecode)) {
+ // If we are returning (or suspending), reset the bytecode array on the
+ // interpreted stack frame to the non-debug variant so that the interpreter
+ // entry trampoline sees the return/suspend bytecode rather than the
+ // DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
// We do not have to deal with operand scale here. If the bytecode at the
// break is prefixed by operand scaling, we would have patched over the
// scaling prefix. We now simply dispatch to the handler for the prefix.
+ // We need to deserialize now to ensure we don't hit the debug break again
+ // after deserializing.
OperandScale operand_scale = OperandScale::kSingle;
- Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
- bytecode, operand_scale);
+ isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(bytecode,
+ operand_scale);
- return MakePair(isolate->debug()->return_value(), code);
+ return MakePair(isolate->debug()->return_value(),
+ Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
@@ -81,27 +85,6 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
}
-// Adds a JavaScript function as a debug event listener.
-// args[0]: debug event listener function to set or null or undefined for
-// clearing the event listener function
-// args[1]: object supplied during callback
-RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CHECK(args[0]->IsJSFunction() || args[0]->IsNullOrUndefined(isolate));
- CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
- if (callback->IsJSFunction()) {
- JavaScriptDebugDelegate* delegate = new JavaScriptDebugDelegate(
- isolate, Handle<JSFunction>::cast(callback), data);
- isolate->debug()->SetDebugDelegate(delegate, true);
- } else {
- isolate->debug()->SetDebugDelegate(nullptr, false);
- }
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -261,7 +244,10 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
result->set(1, *status_str);
- Handle<Object> value_obj(promise->result(), isolate);
+ Handle<Object> value_obj(promise->status() == Promise::kPending
+ ? isolate->heap()->undefined_value()
+ : promise->result(),
+ isolate);
Handle<String> promise_value =
factory->NewStringFromAsciiChecked("[[PromiseValue]]");
result->set(2, *promise_value);
@@ -855,8 +841,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
// local).
if (frame->is_wasm_interpreter_entry()) {
Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame)->wasm_instance()->debug_info(),
- isolate);
+ WasmInterpreterEntryFrame::cast(frame)->debug_info(), isolate);
return *WasmDebugInfo::GetScopeDetails(debug_info, frame->fp(),
inlined_frame_index);
}
@@ -1036,36 +1021,6 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
}
-RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
-
-#ifdef DEBUG
- // Print the scopes for the top frame.
- JavaScriptFrameIterator it(isolate);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- FrameInspector frame_inspector(frame, 0, isolate);
- for (ScopeIterator si(isolate, &frame_inspector); !si.Done(); si.Next()) {
- si.DebugPrint();
- }
- }
-#endif
- return isolate->heap()->undefined_value();
-}
-
-
-// Sets the disable break state
-// args[0]: disable break state
-RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
- isolate->debug()->set_break_points_active(active);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1084,71 +1039,6 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
}
-// Set a break point in a function.
-// args[0]: function
-// args[1]: number: break source position (within the function source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- CHECK(source_position >= function->shared()->start_position() &&
- source_position <= function->shared()->end_position());
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
-
- // Set break point.
- CHECK(isolate->debug()->SetBreakPoint(function, break_point_object_arg,
- &source_position));
-
- return Smi::FromInt(source_position);
-}
-
-// Changes the state of a break point in a script and returns source position
-// where break point was set. NOTE: Regarding performance see the NOTE for
-// GetScriptFromScriptData.
-// args[0]: script to set break point in
-// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- CHECK_GE(source_position, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
-
- // Get the script from the script wrapper.
- CHECK(wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(wrapper->value()));
-
- // Set break point.
- if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position)) {
- return isolate->heap()->undefined_value();
- }
-
- return Smi::FromInt(source_position);
-}
-
-
-// Clear a break point
-// args[0]: number: break point object
-RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
-
- // Clear break point.
- isolate->debug()->ClearBreakPoint(break_point_object_arg);
-
- return isolate->heap()->undefined_value();
-}
-
-
// Change the state of break on exceptions.
// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: Boolean indicating on/off.
@@ -1572,46 +1462,6 @@ int ScriptLinePosition(Handle<Script> script, int line) {
} // namespace
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- return Smi::FromInt(ScriptLinePosition(script_handle, line));
-}
-
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- if (script_handle->type() == Script::TYPE_WASM) {
- // Return zero for now; this function will disappear soon anyway.
- return Smi::FromInt(0);
- }
-
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- const int line_count = line_ends_array->length();
-
- if (line < 0 || line >= line_count) {
- return Smi::FromInt(-1);
- } else {
- return Smi::cast(line_ends_array->get(line));
- }
-}
-
static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
Script::OffsetFlag offset_flag,
Isolate* isolate) {
@@ -1774,56 +1624,26 @@ RUNTIME_FUNCTION(Runtime_ScriptPositionInfo2) {
return *GetJSPositionInfo(script, position, offset_flag, isolate);
}
-// Returns the given line as a string, or null if line is out of bounds.
-// The parameter line is expected to include the script's line offset.
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- if (script_handle->type() == Script::TYPE_WASM) {
- // Return null for now; this function will disappear soon anyway.
- return isolate->heap()->null_value();
- }
-
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- const int line_count = line_ends_array->length();
-
- line -= script_handle->line_offset();
- if (line < 0 || line_count <= line) {
- return isolate->heap()->null_value();
- }
-
- const int start =
- (line == 0) ? 0 : Smi::ToInt(line_ends_array->get(line - 1)) + 1;
- const int end = Smi::ToInt(line_ends_array->get(line));
-
- Handle<String> source =
- handle(String::cast(script_handle->source()), isolate);
- Handle<String> str = isolate->factory()->NewSubString(source, start, end);
-
- return *str;
-}
-
// On function call, depending on circumstances, prepare for stepping in,
// or perform a side effect check.
RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- if (isolate->debug()->last_step_action() >= StepIn) {
- isolate->debug()->PrepareStepIn(fun);
+ if (isolate->debug()->needs_check_on_function_call()) {
+ // Ensure that the callee will perform debug check on function call too.
+ Deoptimizer::DeoptimizeFunction(*fun);
+ if (isolate->debug()->last_step_action() >= StepIn) {
+ isolate->debug()->PrepareStepIn(fun);
+ }
+ if (isolate->needs_side_effect_check() &&
+ !isolate->debug()->PerformSideEffectCheck(fun)) {
+ return isolate->heap()->exception();
+ }
}
- if (isolate->needs_side_effect_check() &&
- !isolate->debug()->PerformSideEffectCheck(fun)) {
- return isolate->heap()->exception();
+ if (fun->shared()->HasDebugInfo() &&
+ fun->shared()->GetDebugInfo()->BreakAtEntry()) {
+ isolate->debug()->Break(nullptr, fun);
}
return isolate->heap()->undefined_value();
}
@@ -1836,15 +1656,6 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugRecordGenerator) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
- CHECK(isolate->debug()->last_step_action() >= StepNext);
- isolate->debug()->RecordGenerator(generator);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
@@ -1876,26 +1687,11 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugPromiseReject) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, rejected_promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- isolate->debug()->OnPromiseReject(rejected_promise, value);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugIsActive) {
SealHandleScope shs(isolate);
return Smi::FromInt(isolate->debug()->is_active());
}
-RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
- UNIMPLEMENTED();
- return nullptr;
-}
-
namespace {
Handle<JSObject> MakeRangeObject(Isolate* isolate, const CoverageBlock& range) {
Factory* factory = isolate->factory();
diff --git a/deps/v8/src/runtime/runtime-error.cc b/deps/v8/src/runtime/runtime-error.cc
index 6ded550d04..7cd98f223b 100644
--- a/deps/v8/src/runtime/runtime-error.cc
+++ b/deps/v8/src/runtime/runtime-error.cc
@@ -20,5 +20,11 @@ RUNTIME_FUNCTION(Runtime_ErrorToString) {
RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
}
+RUNTIME_FUNCTION(Runtime_IsJSError) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSError());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index e9433d2041..a9eddef644 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -88,17 +88,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
return fun->native_context()->debug_context_id();
}
-RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- CONVERT_SMI_ARG_CHECKED(length, 1);
- fun->shared()->set_length(length);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -156,10 +145,10 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<Context> context(source->context());
target->set_context(*context);
- // Make sure we get a fresh copy of the literal vector to avoid cross
- // context contamination, and that the literal vector makes it's way into
+ // Make sure we get a fresh copy of the feedback vector to avoid cross
+ // context contamination, and that the feedback vector makes it's way into
// the target_shared optimized code map.
- JSFunction::EnsureLiterals(target);
+ JSFunction::EnsureFeedbackVector(target);
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
isolate->logger()->LogExistingFunction(
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 9323d236bc..a7d14b839e 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -11,6 +11,12 @@
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_IsJSGeneratorObject) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSGeneratorObject());
+}
+
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -30,6 +36,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
generator->set_receiver(*receiver);
generator->set_register_file(*register_file);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
+ if (generator->IsJSAsyncGeneratorObject()) {
+ Handle<JSAsyncGeneratorObject>::cast(generator)->set_is_awaiting(0);
+ }
return *generator;
}
@@ -55,13 +64,31 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
return generator->receiver();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
+RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitUncaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitUncaught) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
@@ -126,12 +153,11 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
SharedFunctionInfo* shared = generator->function()->shared();
DCHECK(shared->HasBytecodeArray());
- HandlerTable* handler_table =
- HandlerTable::cast(shared->bytecode_array()->handler_table());
+ HandlerTable handler_table(shared->bytecode_array());
int pc = Smi::cast(generator->input_or_debug_pos())->value();
HandlerTable::CatchPrediction catch_prediction = HandlerTable::ASYNC_AWAIT;
- handler_table->LookupRange(pc, nullptr, &catch_prediction);
+ handler_table.LookupRange(pc, nullptr, &catch_prediction);
return isolate->heap()->ToBoolean(catch_prediction == HandlerTable::CAUGHT);
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index f9e9375543..a24ded7e21 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -30,6 +30,12 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_IsScriptWrapper) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsScriptWrapper());
+}
+
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -206,30 +212,6 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
-RUNTIME_FUNCTION(Runtime_ThrowCannotConvertToPrimitive) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCannotConvertToPrimitive));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg1, 1);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowInvalidHint) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, hint, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidHint, hint));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
@@ -258,18 +240,6 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
-RUNTIME_FUNCTION(Runtime_ThrowNonCallableInInstanceOfCheck) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowNonObjectInInstanceOfCheck) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -278,13 +248,6 @@ RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
}
-RUNTIME_FUNCTION(Runtime_ThrowGeneratorRunning) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kGeneratorRunning));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -454,14 +417,6 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
-RUNTIME_FUNCTION(Runtime_ThrowCalledOnNullOrUndefined) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, name));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -486,14 +441,6 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
NewTypeError(MessageTemplate::kDerivedConstructorReturnedNonObject));
}
-RUNTIME_FUNCTION(Runtime_ThrowUndefinedOrNullToObject) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject, name));
-}
-
// ES6 section 7.3.17 CreateListFromArrayLike (obj)
RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
HandleScope scope(isolate);
@@ -526,10 +473,6 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK(Builtins::IsLazy(builtin_id));
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
- }
-
Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
DCHECK_EQ(builtin_id, code->builtin_index());
DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
@@ -638,17 +581,21 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
+ Handle<Object> next;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, next,
+ Object::GetProperty(sync_iterator, isolate->factory()->next_string()));
+
return *isolate->factory()->NewJSAsyncFromSyncIterator(
- Handle<JSReceiver>::cast(sync_iterator));
+ Handle<JSReceiver>::cast(sync_iterator), next);
}
-RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
+RUNTIME_FUNCTION(Runtime_CreateTemplateObject) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
- return *TemplateObjectDescription::GetTemplateObject(
- description, isolate->native_context());
+ return *TemplateObjectDescription::CreateTemplateObject(description);
}
RUNTIME_FUNCTION(Runtime_ReportMessage) {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index b65a2327a3..836bf4d5f6 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -41,21 +41,6 @@ RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
bytecode, operand_scale);
}
-RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
- CONVERT_SMI_ARG_CHECKED(pretenured_flag, 3);
- Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell,
- static_cast<PretenureFlag>(pretenured_flag));
-}
-
#ifdef V8_TRACE_IGNITION
namespace {
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index aaa6034e80..a0e0db8cd0 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -15,6 +15,7 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/factory.h"
+#include "src/global-handles.h"
#include "src/intl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -626,7 +627,8 @@ RUNTIME_FUNCTION(Runtime_PluralRulesSelect) {
icu::UnicodeString result = plural_rules->select(rounded);
return *isolate->factory()
->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ reinterpret_cast<const uint16_t*>(
+ icu::toUCharPtr(result.getBuffer())),
result.length()))
.ToHandleChecked();
}
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index a9fb48f887..a758050306 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -37,24 +37,6 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
return *Module::GetModuleNamespace(module, module_request);
}
-RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- Handle<Module> module(isolate->context()->module());
- return *Module::LoadVariable(module, index);
-}
-
-RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<Module> module(isolate->context()->module());
- Module::StoreVariable(module, index, value);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 057ead9407..90dddab211 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -34,6 +34,14 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
MaybeHandle<Object> result = Object::GetProperty(&it);
if (is_found_out) *is_found_out = it.IsFound();
+
+ if (!it.IsFound() && key->IsSymbol() &&
+ Symbol::cast(*key)->is_private_field()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
+ Object);
+ }
return result;
}
@@ -390,6 +398,14 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
LookupIterator::PropertyOrElement(isolate, object, key, &success);
if (!success) return MaybeHandle<Object>();
+ if (!it.IsFound() && key->IsSymbol() &&
+ Symbol::cast(*key)->is_private_field()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
+ Object);
+ }
+
MAYBE_RETURN_NULL(Object::SetProperty(&it, value, language_mode,
Object::MAY_BE_STORE_FROM_KEYED));
return value;
@@ -439,6 +455,61 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_ObjectValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectValuesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(value);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntries) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
@@ -687,26 +758,6 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
}
-RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
- CHECK_EQ(index->value() & 1, 1);
- FieldIndex field_index =
- FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
- if (field_index.is_inobject()) {
- CHECK(field_index.property_index() <
- object->map()->GetInObjectProperties());
- } else {
- CHECK(field_index.outobject_array_index() <
- object->property_dictionary()->length());
- }
- return *JSObject::FastPropertyAt(object, Representation::Double(),
- field_index);
-}
-
-
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -724,13 +775,6 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
}
-RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
-}
-
static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
return obj->IsNullOrUndefined(isolate) || obj->IsCallable();
}
@@ -770,10 +814,11 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 4);
CONVERT_SMI_ARG_CHECKED(index, 5);
- StoreDataPropertyInLiteralICNexus nexus(vector, vector->ToSlot(index));
+ FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
if (nexus.ic_state() == UNINITIALIZED) {
if (name->IsUniqueName()) {
- nexus.ConfigureMonomorphic(name, handle(object->map()));
+ nexus.ConfigureMonomorphic(name, handle(object->map()),
+ Handle<Code>::null());
} else {
nexus.ConfigureMegamorphic(PROPERTY);
}
@@ -833,31 +878,12 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
}
DCHECK(vector->metadata()->HasTypeProfileSlot());
- CollectTypeProfileNexus nexus(vector, vector->GetTypeProfileSlot());
+ FeedbackNexus nexus(vector, vector->GetTypeProfileSlot());
nexus.Collect(type, position->value());
return isolate->heap()->undefined_value();
}
-// Return property without being observable by accessors or interceptors.
-RUNTIME_FUNCTION(Runtime_GetDataProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- return *JSReceiver::GetDataProperty(object, name);
-}
-
-RUNTIME_FUNCTION(Runtime_GetConstructorName) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-
- CHECK(!object->IsNullOrUndefined(isolate));
- Handle<JSReceiver> recv = Object::ToObject(isolate, object).ToHandleChecked();
- return *JSReceiver::GetConstructorName(recv);
-}
-
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1175,9 +1201,13 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
RUNTIME_FUNCTION(Runtime_IterableToListCanBeElided) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- if (!obj->IsJSObject()) return isolate->heap()->ToBoolean(false);
+ // If an iterator symbol is added to the Number prototype, we could see a Smi.
+ if (obj->IsSmi()) return isolate->heap()->ToBoolean(false);
+ if (!HeapObject::cast(*obj)->IsJSObject()) {
+ return isolate->heap()->ToBoolean(false);
+ }
// While iteration alone may not have observable side-effects, calling
// toNumber on an object will. Make sure the arg is not an array of objects.
@@ -1203,5 +1233,27 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
return *desc.ToPropertyDescriptorObject(isolate);
}
+RUNTIME_FUNCTION(Runtime_AddPrivateField) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ DCHECK(key->is_private_field());
+
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, o, key, LookupIterator::OWN);
+
+ if (it.IsFound()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
+ }
+
+ CHECK(Object::AddDataProperty(&it, value, NONE, kDontThrow,
+ Object::MAY_BE_STORE_FROM_KEYED)
+ .FromJust());
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index 42a7e21b82..d01d115892 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -9,33 +9,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_Multiply) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Multiply(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_Divide) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Divide(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_Modulus) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Modulus(isolate, lhs, rhs));
-}
-
-
RUNTIME_FUNCTION(Runtime_Add) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -45,69 +18,6 @@ RUNTIME_FUNCTION(Runtime_Add) {
}
-RUNTIME_FUNCTION(Runtime_Subtract) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Subtract(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftLeft) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftLeft(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftRight) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftRight(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- Object::ShiftRightLogical(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseAnd(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseOr) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseOr(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseXor) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseXor(isolate, lhs, rhs));
-}
-
RUNTIME_FUNCTION(Runtime_Equal) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -184,14 +94,5 @@ RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-RUNTIME_FUNCTION(Runtime_InstanceOf) {
- HandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- Object::InstanceOf(isolate, object, callable));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 2c28cd3c98..2d3a4fda50 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -1,8 +1,10 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "src/runtime/runtime-utils.h"
+#include "src/api.h"
#include "src/arguments.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -12,27 +14,6 @@
namespace v8 {
namespace internal {
-namespace {
-
-void PromiseRejectEvent(Isolate* isolate, Handle<JSPromise> promise,
- Handle<Object> rejected_promise, Handle<Object> value,
- bool debug_event) {
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
-
- if (isolate->debug()->is_active() && debug_event) {
- isolate->debug()->OnPromiseReject(rejected_promise, value);
- }
-
- // Report only if we don't actually have a handler.
- if (!promise->has_handler()) {
- isolate->ReportPromiseReject(promise, value,
- v8::kPromiseRejectWithNoHandler);
- }
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
@@ -41,21 +22,19 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
Handle<Object> rejected_promise = promise;
if (isolate->debug()->is_active()) {
- // If the Promise.reject call is caught, then this will return
- // undefined, which will be interpreted by PromiseRejectEvent
- // as being a caught exception event.
+ // If the Promise.reject() call is caught, then this will return
+ // undefined, which we interpret as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
}
- PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
- return isolate->heap()->undefined_value();
-}
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+ isolate->debug()->OnPromiseReject(rejected_promise, value);
-RUNTIME_FUNCTION(Runtime_ReportPromiseReject) {
- DCHECK_EQ(2, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- isolate->ReportPromiseReject(promise, value, v8::kPromiseRejectWithNoHandler);
+ // Report only if we don't actually have a handler.
+ if (!promise->has_handler()) {
+ isolate->ReportPromiseReject(promise, value,
+ v8::kPromiseRejectWithNoHandler);
+ }
return isolate->heap()->undefined_value();
}
@@ -73,7 +52,9 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<CallableTask> microtask =
+ isolate->factory()->NewCallableTask(function, isolate->native_context());
isolate->EnqueueMicrotask(microtask);
return isolate->heap()->undefined_value();
}
@@ -85,6 +66,17 @@ RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, microtask_callback, 0);
+ CONVERT_ARG_CHECKED(Object, microtask_data, 1);
+ MicrotaskCallback callback = ToCData<MicrotaskCallback>(microtask_callback);
+ void* data = ToCData<void*>(microtask_data);
+ callback(data);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_PromiseStatus) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -118,23 +110,17 @@ RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_PromiseHookResolve) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kBefore,
- Handle<JSPromise>::cast(promise),
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, payload, 0);
+ Handle<JSPromise> promise;
+ if (JSPromise::From(payload).ToHandle(&promise)) {
+ if (isolate->debug()->is_active()) isolate->PushPromise(promise);
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
+ isolate->factory()->undefined_value());
+ }
}
return isolate->heap()->undefined_value();
}
@@ -142,14 +128,37 @@ RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kAfter,
- Handle<JSPromise>::cast(promise),
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, payload, 0);
+ Handle<JSPromise> promise;
+ if (JSPromise::From(payload).ToHandle(&promise)) {
+ if (isolate->debug()->is_active()) isolate->PopPromise();
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
+ isolate->factory()->undefined_value());
+ }
}
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_RejectPromise) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Oddball, debug_event, 2);
+ return *JSPromise::Reject(promise, reason, debug_event->BooleanValue());
+}
+
+RUNTIME_FUNCTION(Runtime_ResolvePromise) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, resolution, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSPromise::Resolve(promise, resolution));
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index d0afcd2636..920f37cf98 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -1920,14 +1920,6 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
}
-RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- Object* exception = isolate->pending_exception();
- isolate->clear_pending_exception();
- return isolate->ReThrow(exception);
-}
-
RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 76f291f90f..3d2d7940a4 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -123,7 +123,7 @@ Object* DeclareGlobal(
// named interceptor or the interceptor is not masking.
if (!global->HasNamedInterceptor() ||
global->GetNamedInterceptor()->non_masking()) {
- LoadGlobalICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
nexus.ConfigurePropertyCellMode(it.GetPropertyCell());
}
}
@@ -141,7 +141,8 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 4, {
Handle<String> name(String::cast(declarations->get(i)), isolate);
FeedbackSlot slot(Smi::ToInt(declarations->get(i + 1)));
- Handle<Object> possibly_literal_slot(declarations->get(i + 2), isolate);
+ Handle<Object> possibly_feedback_cell_slot(declarations->get(i + 2),
+ isolate);
Handle<Object> initial_value(declarations->get(i + 3), isolate);
bool is_var = initial_value->IsUndefined(isolate);
@@ -150,16 +151,18 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
Handle<Object> value;
if (is_function) {
- DCHECK(possibly_literal_slot->IsSmi());
+ DCHECK(possibly_feedback_cell_slot->IsSmi());
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
- FeedbackSlot literals_slot(Smi::ToInt(*possibly_literal_slot));
- Handle<Cell> literals(Cell::cast(feedback_vector->Get(literals_slot)),
- isolate);
+ FeedbackSlot feedback_cells_slot(
+ Smi::ToInt(*possibly_feedback_cell_slot));
+ Handle<FeedbackCell> feedback_cell(
+ FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)),
+ isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, literals, TENURED);
+ shared, context, feedback_cell, TENURED);
value = function;
} else {
value = isolate->factory()->undefined_value();
@@ -635,34 +638,27 @@ RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell, NOT_TENURED);
+ shared, context, feedback_cell, NOT_TENURED);
return *function;
}
-
RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell, TENURED);
+ shared, context, feedback_cell, TENURED);
return *function;
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 8f6b887f62..6f203b3d01 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -216,35 +216,16 @@ RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
isolate->factory()->undefined_value());
}
-RUNTIME_FUNCTION(Runtime_SubString) {
+RUNTIME_FUNCTION(Runtime_StringSubstring) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
-
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- int start, end;
- // We have a fast integer-only case here to avoid a conversion to double in
- // the common case where from and to are Smis.
- if (args[1]->IsSmi() && args[2]->IsSmi()) {
- CONVERT_SMI_ARG_CHECKED(from_number, 1);
- CONVERT_SMI_ARG_CHECKED(to_number, 2);
- start = from_number;
- end = to_number;
- } else if (args[1]->IsNumber() && args[2]->IsNumber()) {
- CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
- CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2IChecked(from_number);
- end = FastD2IChecked(to_number);
- } else {
- return isolate->ThrowIllegalOperation();
- }
- // The following condition is intentionally robust because the SubString
- // builtin delegates here and we test this in
- // cctest/test-strings/RobustSubStringStub.
- if (end < start || start < 0 || end > string->length()) {
- return isolate->ThrowIllegalOperation();
- }
+ CONVERT_INT32_ARG_CHECKED(start, 1);
+ CONVERT_INT32_ARG_CHECKED(end, 2);
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, string->length());
isolate->counters()->sub_string_runtime()->Increment();
-
return *isolate->factory()->NewSubString(string, start, end);
}
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 2eaef63bbf..488aa756c6 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -12,28 +12,25 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_CreateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CHECK(name->IsString() || name->IsUndefined(isolate));
- Handle<Symbol> symbol = isolate->factory()->NewSymbol();
- if (name->IsString()) symbol->set_name(*name);
+ DCHECK_GE(1, args.length());
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
+ if (args.length() == 1) {
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ CHECK(name->IsString() || name->IsUndefined(isolate));
+ if (name->IsString()) symbol->set_name(*name);
+ }
return *symbol;
}
-
-RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateFieldSymbol) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CHECK(name->IsString() || name->IsUndefined(isolate));
- Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
- if (name->IsString()) symbol->set_name(*name);
+ DCHECK_EQ(0, args.length());
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateFieldSymbol();
return *symbol;
}
-
RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 01e2b198a6..6b2f3467fc 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -175,22 +175,6 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
-RUNTIME_FUNCTION(Runtime_TypeProfile) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (function->has_feedback_vector()) {
- FeedbackVector* vector = function->feedback_vector();
- if (vector->metadata()->HasTypeProfileSlot()) {
- FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
- return nexus.GetTypeProfile();
- }
- }
- return *isolate->factory()->NewJSObject(isolate->object_function());
-}
-
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
@@ -252,8 +236,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
: "non-concurrent");
}
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(concurrency_mode);
@@ -470,121 +453,6 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
- // This only supports the case where the function being exported
- // calls an intermediate function, and the intermediate function
- // calls exactly one imported function
- HandleScope scope(isolate);
- CHECK_EQ(args.length(), 2);
- // It takes two parameters, the first one is the JSFunction,
- // The second one is the type
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // If type is 0, it means that it is supposed to be a direct call into a wasm
- // function.
- // If type is 1, it means that it is supposed to have wrappers.
- CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
- Handle<Code> export_code = handle(function->code());
- CHECK(export_code->kind() == Code::JS_TO_WASM_FUNCTION);
- int const mask =
- RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
- : RelocInfo::CODE_TARGET);
- // check the type of the $export_fct
- wasm::WasmCode* export_fct = nullptr;
- Handle<Code> export_fct_handle;
- wasm::WasmCode* intermediate_fct = nullptr;
- Handle<Code> intermediate_fct_handle;
-
- int count = 0;
- for (RelocIterator it(*export_code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = FLAG_wasm_jit_to_native
- ? rinfo->js_to_wasm_address()
- : rinfo->target_address();
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::kFunction) {
- ++count;
- export_fct = target;
- }
- } else {
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- export_fct_handle = handle(target);
- }
- }
- }
- CHECK_EQ(count, 1);
- // check the type of the intermediate_fct
- count = 0;
- if (FLAG_wasm_jit_to_native) {
- for (RelocIterator it(export_fct->instructions(), export_fct->reloc_info(),
- export_fct->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::kFunction) {
- ++count;
- intermediate_fct = target;
- }
- }
- } else {
- count = 0;
- for (RelocIterator it(*export_fct_handle, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- intermediate_fct_handle = handle(target);
- }
- }
- }
- CHECK_EQ(count, 1);
- // Check the type of the imported exported function, it should be also a wasm
- // function in our case.
- CHECK(type->value() == 0 || type->value() == 1);
-
- count = 0;
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode::Kind target_kind = type->value() == 0
- ? wasm::WasmCode::kWasmToWasmWrapper
- : wasm::WasmCode::kWasmToJsWrapper;
- for (RelocIterator it(intermediate_fct->instructions(),
- intermediate_fct->reloc_info(),
- intermediate_fct->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == target_kind) {
- ++count;
- }
- }
- } else {
- Code::Kind target_kind = type->value() == 0 ? Code::WASM_TO_WASM_FUNCTION
- : Code::WASM_TO_JS_FUNCTION;
- count = 0;
- for (RelocIterator it(*intermediate_fct_handle, mask); !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == target_kind) {
- ++count;
- }
- }
- }
- CHECK_LE(count, 1);
- return isolate->heap()->ToBoolean(count == 1);
-}
-
RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -757,6 +625,18 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, arg, 0);
+ if (arg->IsTrue(isolate)) {
+ isolate->set_force_slow_path(true);
+ } else {
+ DCHECK(arg->IsFalse(isolate));
+ isolate->set_force_slow_path(false);
+ }
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
@@ -774,6 +654,10 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ if (FLAG_disable_abortjs) {
+ base::OS::PrintError("[disabled] abort: %s\n", message->ToCString().get());
+ return nullptr;
+ }
base::OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -845,31 +729,6 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
return obj; // return TOS
}
-RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
-
- Factory* factory = isolate->factory();
- Handle<JSMessageObject> message_obj =
- isolate->CreateMessage(exception_obj, nullptr);
-
- Handle<JSObject> message = factory->NewJSObject(isolate->object_function());
-
- Handle<String> key;
- Handle<Object> value;
-
- key = factory->NewStringFromAsciiChecked("start_pos");
- value = handle(Smi::FromInt(message_obj->start_position()), isolate);
- JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
-
- key = factory->NewStringFromAsciiChecked("end_pos");
- value = handle(Smi::FromInt(message_obj->end_position()), isolate);
- JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
-
- return *message;
-}
-
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -964,7 +823,6 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FixedTypedArrayElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
@@ -985,7 +843,7 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+ return isolate->heap()->ToBoolean(isolate->IsSpeciesLookupChainIntact());
}
// Take a compiled wasm module, serialize it and copy the buffer into an array
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 85fb2d2173..f8fd3cc622 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -14,14 +14,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
- return holder->byte_length();
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -56,7 +48,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
size_t length;
@@ -66,19 +58,12 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
return accessor->CopyElements(source, target, length);
}
-#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
- RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
- return holder->accessor(); \
- }
-
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
-BUFFER_VIEW_GETTER(TypedArray, Length, length)
-
-#undef BUFFER_VIEW_GETTER
+RUNTIME_FUNCTION(Runtime_TypedArrayGetLength) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ return holder->length();
+}
RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasNeutered) {
HandleScope scope(isolate);
@@ -162,58 +147,6 @@ RUNTIME_FUNCTION(Runtime_IsTypedArray) {
return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
}
-RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(
- args[0]->IsJSTypedArray() &&
- JSTypedArray::cast(args[0])->GetBuffer()->is_shared());
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSTypedArray()) {
- return isolate->heap()->false_value();
- }
-
- Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
- return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
- obj->type() != kExternalFloat32Array &&
- obj->type() != kExternalFloat64Array &&
- obj->type() != kExternalUint8ClampedArray);
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSTypedArray()) {
- return isolate->heap()->false_value();
- }
-
- Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
- return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
- obj->type() == kExternalInt32Array);
-}
-
-RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 2);
- Handle<JSTypedArray> exemplar = args.at<JSTypedArray>(0);
- Handle<Object> length = args.at(1);
- int argc = 1;
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = length;
- Handle<JSTypedArray> result_array;
- // TODO(tebbi): Pass correct method name.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_array,
- JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(), ""));
- return *result_array;
-}
-
// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
RUNTIME_FUNCTION(Runtime_TypedArraySet) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index d05f4984c6..2bfd280803 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -44,7 +44,6 @@ namespace internal {
F(GetArrayKeys, 2, 1) \
F(TrySliceSimpleNonFastElements, 3, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
- F(FunctionBind, -1, 1) \
F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \
@@ -56,9 +55,6 @@ namespace internal {
F(SpreadIterablePrepare, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
- F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
- F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
- F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
F(AtomicsExchange, 3, 1) \
F(AtomicsCompareExchange, 4, 1) \
F(AtomicsAdd, 3, 1) \
@@ -78,7 +74,8 @@ namespace internal {
F(BigIntEqualToString, 2, 1) \
F(BigIntToBoolean, 1, 1) \
F(BigIntToNumber, 1, 1) \
- F(BigIntUnaryOp, 2, 1)
+ F(BigIntUnaryOp, 2, 1) \
+ F(ToBigInt, 1, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -99,8 +96,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(TheHole, 0, 1) \
- F(GenericHash, 1, 1) \
- F(GetExistingHash, 1, 1) \
F(SetGrow, 1, 1) \
F(SetShrink, 1, 1) \
F(SetIteratorClone, 1, 1) \
@@ -108,7 +103,6 @@ namespace internal {
F(MapGrow, 1, 1) \
F(MapIteratorClone, 1, 1) \
F(GetWeakMapEntries, 2, 1) \
- F(WeakCollectionInitialize, 1, 1) \
F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1) \
F(GetWeakSetValues, 2, 1) \
@@ -130,12 +124,10 @@ namespace internal {
#define FOR_EACH_INTRINSIC_DATE(F) \
F(IsDate, 1, 1) \
- F(DateCurrentTime, 0, 1) \
- F(ThrowNotDateError, 0, 1)
+ F(DateCurrentTime, 0, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
F(HandleDebuggerStatement, 0, 1) \
- F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
F(DebugGetInternalProperties, 1, 1) \
F(DebugGetPropertyDetails, 2, 1) \
@@ -153,12 +145,7 @@ namespace internal {
F(GetGeneratorScopeCount, 1, 1) \
F(GetGeneratorScopeDetails, 2, 1) \
F(SetScopeVariableValue, 6, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(SetBreakPointsActive, 1, 1) \
F(GetBreakLocations, 1, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
- F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 2, 1) \
@@ -177,22 +164,16 @@ namespace internal {
F(GetHeapUsage, 0, 1) \
F(GetScript, 1, 1) \
F(ScriptLineCount, 1, 1) \
- F(ScriptLineStartPosition, 2, 1) \
- F(ScriptLineEndPosition, 2, 1) \
F(ScriptLocationFromLine, 4, 1) \
F(ScriptLocationFromLine2, 4, 1) \
F(ScriptPositionInfo, 3, 1) \
F(ScriptPositionInfo2, 3, 1) \
- F(ScriptSourceLine, 2, 1) \
F(DebugOnFunctionCall, 1, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
- F(DebugRecordGenerator, 1, 1) \
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
- F(DebugPromiseReject, 2, 1) \
F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugIsActive, 0, 1) \
- F(DebugBreakInOptimizedCode, 0, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugTogglePreciseCoverage, 1, 1) \
F(DebugToggleBlockCoverage, 1, 1) \
@@ -222,8 +203,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
- F(InterpreterDeserializeLazy, 2, 1) \
- F(InterpreterNewClosure, 4, 1)
+ F(InterpreterDeserializeLazy, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
@@ -232,7 +212,6 @@ namespace internal {
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetContextData, 1, 1) \
- F(FunctionSetLength, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
@@ -246,11 +225,14 @@ namespace internal {
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
- F(GeneratorGetContext, 1, 1) \
F(GeneratorGetInputOrDebugPos, 1, 1) \
+ F(AsyncFunctionAwaitCaught, 3, 1) \
+ F(AsyncFunctionAwaitUncaught, 3, 1) \
F(AsyncGeneratorResolve, 3, 1) \
F(AsyncGeneratorReject, 2, 1) \
F(AsyncGeneratorYield, 3, 1) \
+ F(AsyncGeneratorAwaitCaught, 2, 1) \
+ F(AsyncGeneratorAwaitUncaught, 2, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
F(GeneratorGetResumeMode, 1, 1) \
@@ -311,35 +293,28 @@ namespace internal {
F(PromoteScheduledException, 0, 1) \
F(ReThrow, 1, 1) \
F(RunMicrotasks, 0, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
F(StackGuard, 0, 1) \
F(Throw, 1, 1) \
F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCannotConvertToPrimitive, 0, 1) \
F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowCalledOnNullOrUndefined, 1, 1) \
F(ThrowConstructedNonConstructable, 1, 1) \
F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowGeneratorRunning, 0, 1) \
- F(ThrowIncompatibleMethodReceiver, 2, 1) \
- F(ThrowInvalidHint, 1, 1) \
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowThrowMethodMissing, 0, 1) \
F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
- F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1) \
- F(GetTemplateObject, 1, 1) \
+ F(CreateTemplateObject, 1, 1) \
F(ReportMessage, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
@@ -366,9 +341,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_MODULE(F) \
F(DynamicImportCall, 2, 1) \
F(GetImportMetaObject, 0, 1) \
- F(GetModuleNamespace, 1, 1) \
- F(LoadModuleVariable, 1, 1) \
- F(StoreModuleVariable, 2, 1)
+ F(GetModuleNamespace, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
F(IsValidSmi, 1, 1) \
@@ -391,6 +364,10 @@ namespace internal {
F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(AddNamedProperty, 4, 1) \
@@ -406,14 +383,10 @@ namespace internal {
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
F(CompleteInobjectSlackTrackingForMap, 1, 1) \
- F(LoadMutableDouble, 2, 1) \
F(TryMigrateInstance, 1, 1) \
- F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(DefineDataPropertyInLiteral, 6, 1) \
F(CollectTypeProfile, 3, 1) \
- F(GetDataProperty, 2, 1) \
- F(GetConstructorName, 1, 1) \
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
F(IsJSReceiver, 1, 1) \
@@ -437,21 +410,12 @@ namespace internal {
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(CreateDataProperty, 3, 1) \
+ F(AddPrivateField, 3, 1) \
F(IterableToListCanBeElided, 1, 1) \
F(GetOwnPropertyDescriptor, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
- F(Multiply, 2, 1) \
- F(Divide, 2, 1) \
- F(Modulus, 2, 1) \
F(Add, 2, 1) \
- F(Subtract, 2, 1) \
- F(ShiftLeft, 2, 1) \
- F(ShiftRight, 2, 1) \
- F(ShiftRightLogical, 2, 1) \
- F(BitwiseAnd, 2, 1) \
- F(BitwiseOr, 2, 1) \
- F(BitwiseXor, 2, 1) \
F(Equal, 2, 1) \
F(NotEqual, 2, 1) \
F(StrictEqual, 2, 1) \
@@ -459,13 +423,11 @@ namespace internal {
F(LessThan, 2, 1) \
F(GreaterThan, 2, 1) \
F(LessThanOrEqual, 2, 1) \
- F(GreaterThanOrEqual, 2, 1) \
- F(InstanceOf, 2, 1)
+ F(GreaterThanOrEqual, 2, 1)
#define FOR_EACH_INTRINSIC_PROMISE(F) \
F(EnqueueMicrotask, 1, 1) \
F(PromiseHookInit, 2, 1) \
- F(PromiseHookResolve, 1, 1) \
F(PromiseHookBefore, 1, 1) \
F(PromiseHookAfter, 1, 1) \
F(PromiseMarkAsHandled, 1, 1) \
@@ -473,7 +435,8 @@ namespace internal {
F(PromiseRevokeReject, 1, 1) \
F(PromiseResult, 1, 1) \
F(PromiseStatus, 1, 1) \
- F(ReportPromiseReject, 2, 1)
+ F(RejectPromise, 3, 1) \
+ F(ResolvePromise, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
@@ -488,7 +451,6 @@ namespace internal {
F(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
- F(RegExpExecReThrow, 0, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpInternalReplace, 3, 1) \
F(RegExpReplace, 3, 1) \
@@ -507,8 +469,8 @@ namespace internal {
F(NewRestParameter, 1, 1) \
F(NewSloppyArguments, 3, 1) \
F(NewArgumentsElements, 3, 1) \
- F(NewClosure, 3, 1) \
- F(NewClosure_Tenured, 3, 1) \
+ F(NewClosure, 2, 1) \
+ F(NewClosure_Tenured, 2, 1) \
F(NewScriptContext, 2, 1) \
F(NewFunctionContext, 2, 1) \
F(PushModuleContext, 3, 1) \
@@ -530,7 +492,7 @@ namespace internal {
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
- F(SubString, 3, 1) \
+ F(StringSubstring, 3, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
F(StringCharCodeAt, 2, 1) \
@@ -548,106 +510,103 @@ namespace internal {
F(StringCharFromCode, 1, 1) \
F(StringMaxLength, 0, 1)
-#define FOR_EACH_INTRINSIC_SYMBOL(F) \
- F(CreateSymbol, 1, 1) \
- F(CreatePrivateSymbol, 1, 1) \
- F(SymbolDescription, 1, 1) \
- F(SymbolDescriptiveString, 1, 1) \
+#define FOR_EACH_INTRINSIC_SYMBOL(F) \
+ F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
+ F(CreatePrivateFieldSymbol, 0, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F) \
- F(ConstructDouble, 2, 1) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
+ F(ClearFunctionFeedback, 1, 1) \
+ F(CompleteInobjectSlackTracking, 1, 1) \
F(ConstructConsString, 2, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(DebugPrint, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
F(DeoptimizeNow, 0, 1) \
- F(RunningInSimulator, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(TypeProfile, 1, 1) \
- F(OptimizeOsr, -1, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
+ F(DeserializeWasmModule, 2, 1) \
+ F(DisallowCodegenFromStrings, 1, 1) \
+ F(DisallowWasmCodegen, 1, 1) \
+ F(DisassembleFunction, 1, 1) \
+ F(FreezeWasmLazyCompilation, 1, 1) \
+ F(GetCallable, 0, 1) \
F(GetDeoptCount, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
F(GetUndetectable, 0, 1) \
- F(GetCallable, 0, 1) \
- F(ClearFunctionFeedback, 1, 1) \
- F(CheckWasmWrapperElision, 2, 1) \
- F(NotifyContextDisposed, 0, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(DebugPrint, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(DebugTrackRetainingPath, -1, 1) \
- F(PrintWithNameForAssert, 2, 1) \
- F(GetExceptionDetails, 1, 1) \
+ F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
- F(SystemBreak, 0, 1) \
- F(SetFlags, 1, 1) \
- F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
- F(NativeScriptsCount, 0, 1) \
- F(DisassembleFunction, 1, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(HaveSameMap, 2, 1) \
- F(InNewSpace, 1, 1) \
- F(HasFastElements, 1, 1) \
- F(HasSmiElements, 1, 1) \
- F(HasObjectElements, 1, 1) \
- F(HasSmiOrObjectElements, 1, 1) \
- F(HasDoubleElements, 1, 1) \
- F(HasHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasSloppyArgumentsElements, 1, 1) \
- F(HasFixedTypedArrayElements, 1, 1) \
+ F(HasDoubleElements, 1, 1) \
+ F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
- F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedBigInt64Elements, 1, 1) \
+ F(HasFixedBigUint64Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
F(HasFixedInt8Elements, 1, 1) \
F(HasFixedUint16Elements, 1, 1) \
- F(HasFixedInt16Elements, 1, 1) \
F(HasFixedUint32Elements, 1, 1) \
- F(HasFixedInt32Elements, 1, 1) \
- F(HasFixedFloat32Elements, 1, 1) \
- F(HasFixedFloat64Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1) \
- F(SpeciesProtector, 0, 1) \
- F(SerializeWasmModule, 1, 1) \
- F(DeserializeWasmModule, 2, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasHoleyElements, 1, 1) \
+ F(IsJSError, 1, 1) \
+ F(IsJSGeneratorObject, 1, 1) \
+ F(IsJSMapIterator, 1, 1) \
+ F(IsScriptWrapper, 1, 1) \
+ F(IsJSSetIterator, 1, 1) \
+ F(HasObjectElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasSmiElements, 1, 1) \
+ F(HasSmiOrObjectElements, 1, 1) \
+ F(HaveSameMap, 2, 1) \
+ F(HeapObjectVerify, 1, 1) \
+ F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsLiftoffFunction, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(GetWasmRecoveredTrapCount, 0, 1) \
- F(DisallowCodegenFromStrings, 1, 1) \
- F(DisallowWasmCodegen, 1, 1) \
+ F(NativeScriptsCount, 0, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, -1, 1) \
+ F(PrintWithNameForAssert, 2, 1) \
+ F(RedirectToWasmInterpreter, 2, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(SerializeWasmModule, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetFlags, 1, 1) \
+ F(SetForceSlowPath, 1, 1) \
+ F(SetWasmCompileControls, 2, 1) \
+ F(SetWasmInstantiateControls, 0, 1) \
+ F(SpeciesProtector, 0, 1) \
+ F(SystemBreak, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
F(ValidateWasmModuleState, 1, 1) \
F(ValidateWasmOrphanedInstance, 1, 1) \
- F(SetWasmCompileControls, 2, 1) \
- F(SetWasmInstantiateControls, 0, 1) \
- F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
- F(RedirectToWasmInterpreter, 2, 1) \
- F(WasmTraceMemory, 1, 1) \
- F(CompleteInobjectSlackTracking, 1, 1) \
- F(IsLiftoffFunction, 1, 1) \
- F(FreezeWasmLazyCompilation, 1, 1)
+ F(WasmTraceMemory, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferGetByteLength, 1, 1) \
F(ArrayBufferNeuter, 1, 1) \
F(TypedArrayCopyElements, 3, 1) \
- F(ArrayBufferViewGetByteLength, 1, 1) \
- F(ArrayBufferViewGetByteOffset, 1, 1) \
F(ArrayBufferViewWasNeutered, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArraySortFast, 1, 1) \
F(TypedArraySet, 2, 1) \
- F(IsTypedArray, 1, 1) \
- F(IsSharedTypedArray, 1, 1) \
- F(IsSharedIntegerTypedArray, 1, 1) \
- F(IsSharedInteger32TypedArray, 1, 1) \
- F(TypedArraySpeciesCreateByLength, 2, 1)
+ F(IsTypedArray, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F) \
F(WasmGrowMemory, 1, 1) \
@@ -683,8 +642,7 @@ namespace internal {
F(StoreGlobalIC_Miss, 4, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
- F(StorePropertyWithInterceptor, 5, 1) \
- F(Unreachable, 0, 1)
+ F(StorePropertyWithInterceptor, 5, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
FOR_EACH_INTRINSIC_IC(F) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 6323730b99..eac58186d5 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
@@ -70,8 +70,8 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, constant_pool_);
- Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
- target + delta, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(pc_, constant_pool_, target + delta,
+ SKIP_ICACHE_FLUSH);
}
}
@@ -159,7 +159,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -179,15 +179,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -197,15 +197,15 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -221,7 +221,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
// Operand constructors
-Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
int32_t Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode) {
@@ -281,26 +281,24 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
}
// This code assumes the FIXED_SEQUENCE of IIHF/IILF
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
// Check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
@@ -315,7 +313,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 6);
+ Assembler::FlushICache(pc, 6);
}
patched = true;
} else {
@@ -344,7 +342,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(pc + instr1_length), instr_2);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 12);
+ Assembler::FlushICache(pc, 12);
}
patched = true;
}
@@ -358,7 +356,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 6);
+ Assembler::FlushICache(pc, 6);
}
patched = true;
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 166da1c451..56870fd7c0 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -279,22 +279,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -336,7 +334,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
- set_target_address_at(nullptr, pc, static_cast<Address>(nullptr),
+ set_target_address_at(pc, static_cast<Address>(nullptr),
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
break;
@@ -2224,8 +2222,7 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
- set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
- SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, buffer_ + pos, SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 4a5945de87..b0cc5b8cc4 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -345,12 +345,6 @@ C_REGISTERS(DECLARE_C_REGISTER)
// -----------------------------------------------------------------------------
// Machine instruction Operands
-#if V8_TARGET_ARCH_S390X
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
-#else
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
-#endif
-
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
typedef uint8_t Length;
@@ -369,7 +363,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rmode_(rmode)) {
value_.immediate = immediate;
}
@@ -379,7 +373,7 @@ class Operand BASE_EMBEDDED {
value_.immediate = reinterpret_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -555,7 +549,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -570,12 +564,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the IIHF/IILF instruction pair.
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 783b995c72..91396bb597 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -435,6 +435,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
+ // Reset the masking register.
+ if (FLAG_branch_load_poisoning) {
+ __ ResetSpeculationPoisonRegister();
+ }
+
// Compute the handler entry address and jump to it.
__ mov(r3, Operand(pending_handler_entrypoint_address));
__ LoadP(r3, MemOperand(r3));
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index df02570783..ecec9cb408 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -33,10 +33,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index de4db00cf1..66d77d1250 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -288,6 +288,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
}
+ break;
case 'm': {
return FormatMask(instr, format);
}
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/s390/frame-constants-s390.h
index 3c2a4c89d3..54638f56bc 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/s390/frame-constants-s390.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_FRAMES_S390_H_
-#define V8_S390_FRAMES_S390_H_
+#ifndef V8_S390_FRAME_CONSTANTS_S390_H_
+#define V8_S390_FRAME_CONSTANTS_S390_H_
namespace v8 {
namespace internal {
@@ -45,4 +45,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_S390_FRAMES_S390_H_
+#endif // V8_S390_FRAME_CONSTANTS_S390_H_
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 3cb4f2e375..eae0739361 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -66,12 +66,6 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index fe24884378..50db39c6b5 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -1049,7 +1050,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
- StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+ StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
// Restore current context from top and clear it in debug mode.
mov(ip,
@@ -1215,13 +1216,29 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r6, Operand(debug_is_active));
+ tm(MemOperand(r6), Operand::Zero());
+ bne(&skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r6, Operand(debug_hook_avtive));
- LoadB(r6, MemOperand(r6));
- CmpP(r6, Operand::Zero());
+ tm(MemOperand(r6), Operand::Zero());
+ beq(&call_hook);
+
+ LoadP(r6, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r6, &skip_hook);
+ LoadP(r6, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
+ SmiUntag(r0, r6);
+ tmll(r0, Operand(DebugInfo::kBreakAtEntry));
beq(&skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1279,7 +1296,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = ip;
+ Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1332,14 +1349,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r3, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r3, function);
- InvokeFunction(r3, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1529,6 +1538,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
@@ -1947,7 +1962,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
value = src.immediate();
}
- if (src.rmode() != kRelocInfo_NONEPTR) {
+ if (src.rmode() != RelocInfo::NONE) {
// some form of relocation needed
RecordRelocInfo(src.rmode(), value);
}
@@ -3166,7 +3181,7 @@ void TurboAssembler::CmpP(Register src1, Register src2) {
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
- if (opnd.rmode() == kRelocInfo_NONEPTR) {
+ if (opnd.rmode() == RelocInfo::NONE) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
@@ -3183,7 +3198,7 @@ void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- if (opnd.rmode() == kRelocInfo_NONEPTR) {
+ if (opnd.rmode() == RelocInfo::NONE) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
@@ -3470,7 +3485,7 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK_EQ(opnd.rmode(), kRelocInfo_NONEPTR);
+ DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
@@ -4269,6 +4284,10 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
}
#endif
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index fcc62f21a9..1c3ea3fc54 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -14,20 +14,23 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = r2;
-const Register kReturnRegister1 = r3;
-const Register kReturnRegister2 = r4;
-const Register kJSFunctionRegister = r3;
-const Register kContextRegister = r13;
-const Register kAllocateSizeRegister = r3;
-const Register kInterpreterAccumulatorRegister = r2;
-const Register kInterpreterBytecodeOffsetRegister = r6;
-const Register kInterpreterBytecodeArrayRegister = r7;
-const Register kInterpreterDispatchTableRegister = r8;
-const Register kJavaScriptCallArgCountRegister = r2;
-const Register kJavaScriptCallNewTargetRegister = r5;
-const Register kRuntimeCallFunctionRegister = r3;
-const Register kRuntimeCallArgCountRegister = r2;
+constexpr Register kReturnRegister0 = r2;
+constexpr Register kReturnRegister1 = r3;
+constexpr Register kReturnRegister2 = r4;
+constexpr Register kJSFunctionRegister = r3;
+constexpr Register kContextRegister = r13;
+constexpr Register kAllocateSizeRegister = r3;
+constexpr Register kSpeculationPoisonRegister = r9;
+constexpr Register kInterpreterAccumulatorRegister = r2;
+constexpr Register kInterpreterBytecodeOffsetRegister = r6;
+constexpr Register kInterpreterBytecodeArrayRegister = r7;
+constexpr Register kInterpreterDispatchTableRegister = r8;
+constexpr Register kJavaScriptCallArgCountRegister = r2;
+constexpr Register kJavaScriptCallNewTargetRegister = r5;
+constexpr Register kJavaScriptCallCodeStartRegister = r4;
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r3;
+constexpr Register kRuntimeCallArgCountRegister = r2;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -1001,6 +1004,8 @@ class TurboAssembler : public Assembler {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
+ void ResetSpeculationPoisonRegister();
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1082,6 +1087,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
CompareRoot(with, index);
@@ -1139,10 +1147,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support
void MaybeDropFrames();
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index f6754bdd4b..73ca0d5a8a 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -640,7 +640,7 @@ void S390Debugger::Debug() {
#undef XSTR
}
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -1488,11 +1488,6 @@ void Simulator::EvalTableInit() {
} // NOLINT
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
static base::OnceType once = V8_ONCE_INIT;
base::CallOnce(&once, &Simulator::EvalTableInit);
// Set up simulator support first. Some of this information is needed to
@@ -2332,7 +2327,7 @@ void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
icount_++;
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index 1ff8020e6a..7c4eb74b6c 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -187,6 +187,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -425,9 +426,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 83031a2f36..176693d2aa 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -51,7 +51,7 @@ SafepointTable::SafepointTable(Address instruction_start,
}
SafepointTable::SafepointTable(Code* code)
- : SafepointTable(code->instruction_start(), code->safepoint_table_offset(),
+ : SafepointTable(code->InstructionStart(), code->safepoint_table_offset(),
code->stack_slots(), true) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/simulator-base.cc
index 72a5daefce..f075ad72ac 100644
--- a/deps/v8/src/simulator-base.cc
+++ b/deps/v8/src/simulator-base.cc
@@ -20,9 +20,21 @@ base::Mutex* SimulatorBase::redirection_mutex_ = nullptr;
Redirection* SimulatorBase::redirection_ = nullptr;
// static
+base::Mutex* SimulatorBase::i_cache_mutex_ = nullptr;
+
+// static
+base::CustomMatcherHashMap* SimulatorBase::i_cache_ = nullptr;
+
+// static
void SimulatorBase::InitializeOncePerProcess() {
DCHECK_NULL(redirection_mutex_);
redirection_mutex_ = new base::Mutex();
+
+ DCHECK_NULL(i_cache_mutex_);
+ i_cache_mutex_ = new base::Mutex();
+
+ DCHECK_NULL(i_cache_);
+ i_cache_ = new base::CustomMatcherHashMap(&Simulator::ICacheMatch);
}
// static
@@ -32,40 +44,40 @@ void SimulatorBase::GlobalTearDown() {
Redirection::DeleteChain(redirection_);
redirection_ = nullptr;
-}
-// static
-void SimulatorBase::Initialize(Isolate* isolate) {
- ExternalReference::set_redirector(isolate, &RedirectExternalReference);
-}
+ delete i_cache_mutex_;
+ i_cache_mutex_ = nullptr;
-// static
-void SimulatorBase::TearDown(base::CustomMatcherHashMap* i_cache) {
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
+ if (i_cache_ != nullptr) {
+ for (base::HashMap::Entry* entry = i_cache_->Start(); entry != nullptr;
+ entry = i_cache_->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
- delete i_cache;
}
+ delete i_cache_;
+ i_cache_ = nullptr;
+}
+
+// static
+void SimulatorBase::Initialize(Isolate* isolate) {
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
}
// static
-void* SimulatorBase::RedirectExternalReference(Isolate* isolate,
- void* external_function,
+void* SimulatorBase::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_instruction();
}
-Redirection::Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
+Redirection::Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function), type_(type), next_(nullptr) {
next_ = Simulator::redirection();
+ base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
Simulator::SetRedirectInstruction(
reinterpret_cast<Instruction*>(address_of_instruction()));
- Simulator::FlushICache(isolate->simulator_i_cache(),
+ Simulator::FlushICache(Simulator::i_cache(),
reinterpret_cast<void*>(&instruction_),
sizeof(instruction_));
Simulator::set_redirection(this);
@@ -77,7 +89,7 @@ Redirection::Redirection(Isolate* isolate, void* external_function,
}
// static
-Redirection* Redirection::Get(Isolate* isolate, void* external_function,
+Redirection* Redirection::Get(void* external_function,
ExternalReference::Type type) {
Redirection* current = Simulator::redirection();
for (; current != nullptr; current = current->next_) {
@@ -86,7 +98,7 @@ Redirection* Redirection::Get(Isolate* isolate, void* external_function,
return current;
}
}
- return new Redirection(isolate, external_function, type);
+ return new Redirection(external_function, type);
}
} // namespace internal
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
index 84c1f2fd5b..47a6b1a52c 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/simulator-base.h
@@ -24,26 +24,28 @@ class SimulatorBase {
static void InitializeOncePerProcess();
static void GlobalTearDown();
- // Call on isolate initialization and teardown.
+ // Call on isolate initialization.
static void Initialize(Isolate* isolate);
- static void TearDown(base::CustomMatcherHashMap* i_cache);
static base::Mutex* redirection_mutex() { return redirection_mutex_; }
static Redirection* redirection() { return redirection_; }
static void set_redirection(Redirection* r) { redirection_ = r; }
+ static base::Mutex* i_cache_mutex() { return i_cache_mutex_; }
+ static base::CustomMatcherHashMap* i_cache() { return i_cache_; }
+
protected:
template <typename Return, typename SimT, typename CallImpl, typename... Args>
static Return VariadicCall(SimT* sim, CallImpl call, byte* entry,
Args... args) {
// Convert all arguments to intptr_t. Fails if any argument is not integral
// or pointer.
- std::array<intptr_t, sizeof...(args)> args_arr{ConvertArg(args)...};
+ std::array<intptr_t, sizeof...(args)> args_arr{{ConvertArg(args)...}};
intptr_t ret = (sim->*call)(entry, args_arr.size(), args_arr.data());
return ConvertReturn<Return>(ret);
}
- // Convert back integral return types.
+ // Convert back integral return types. This is always a narrowing conversion.
template <typename T>
static typename std::enable_if<std::is_integral<T>::value, T>::type
ConvertReturn(intptr_t ret) {
@@ -64,14 +66,16 @@ class SimulatorBase {
intptr_t ret) {}
private:
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
static base::Mutex* redirection_mutex_;
static Redirection* redirection_;
+ static base::Mutex* i_cache_mutex_;
+ static base::CustomMatcherHashMap* i_cache_;
+
// Helper methods to convert arbitrary integer or pointer arguments to the
// needed generic argument type intptr_t.
@@ -80,7 +84,16 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+#if V8_TARGET_ARCH_MIPS64
+ // The MIPS64 calling convention is to sign extend all values, even unsigned
+ // ones.
+ using signed_t = typename std::make_signed<T>::type;
+ return static_cast<intptr_t>(static_cast<signed_t>(arg));
+#else
+ // Standard C++ convertion: Sign-extend signed values, zero-extend unsigned
+ // values.
return static_cast<intptr_t>(arg);
+#endif
}
// Convert pointer-typed argument to intptr_t.
@@ -108,8 +121,7 @@ class SimulatorBase {
// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
class Redirection {
public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type);
+ Redirection(void* external_function, ExternalReference::Type type);
Address address_of_instruction() {
#if ABI_USES_FUNCTION_DESCRIPTORS
@@ -122,7 +134,7 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(Isolate* isolate, void* external_function,
+ static Redirection* Get(void* external_function,
ExternalReference::Type type);
static Redirection* FromInstruction(Instruction* instruction) {
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index 53a0f30612..e8e086fca3 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -109,14 +109,35 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
allocator()->ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
DisallowHeapAllocation no_gc;
- return DeserializeBuiltinRaw(builtin_id);
+ Code* code = DeserializeBuiltinRaw(builtin_id);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ DCHECK(isolate()->builtins()->is_initialized());
+ OFStream os(stdout);
+ code->Disassemble(Builtins::name(builtin_id), os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ return code;
}
Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
- return DeserializeHandlerRaw(bytecode, operand_scale);
+ Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ OFStream os(stdout);
+ code->Disassemble(Bytecodes::ToString(bytecode), os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ return code;
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
@@ -136,8 +157,7 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
return code;
}
@@ -161,8 +181,7 @@ Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
return code;
}
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
index 38ba2fecea..1ae49686b8 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -56,7 +56,8 @@ class BuiltinDeserializer final
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
UNREACHABLE();
}
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
index 893c79c05e..0109a85b6b 100644
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -65,8 +65,8 @@ void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
-void BuiltinSerializer::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void BuiltinSerializer::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
UNREACHABLE(); // We iterate manually in SerializeBuiltins.
}
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
index bb8bbdebfa..abc8be74e5 100644
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -28,7 +28,8 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
void SerializeBuiltinsAndHandlers();
private:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void SerializeBuiltin(Code* code);
void SerializeHandler(Code* code);
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 4210845573..8126e9ee2c 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -22,6 +22,17 @@
namespace v8 {
namespace internal {
+ScriptData::ScriptData(const byte* data, int length)
+ : owns_data_(false), rejected_(false), data_(data), length_(length) {
+ if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
+ byte* copy = NewArray<byte>(length);
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
+ CopyBytes(copy, data, length);
+ data_ = copy;
+ AcquireDataOwnership();
+ }
+}
+
ScriptData* CodeSerializer::Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
Handle<String> source) {
@@ -52,7 +63,8 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
DisallowHeapAllocation no_gc;
- VisitRootPointer(Root::kHandleScope, Handle<Object>::cast(obj).location());
+ VisitRootPointer(Root::kHandleScope, nullptr,
+ Handle<Object>::cast(obj).location());
SerializeDeferredObjects();
Pad();
@@ -134,14 +146,16 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
- // Do not serialize when a debugger is active.
- DCHECK(sfi->debug_info()->IsSmi());
+ // Clear debug info.
+ Object* debug_info = sfi->debug_info();
+ sfi->set_debug_info(Smi::kZero);
// Mark SFI to indicate whether the code is cached.
bool was_deserialized = sfi->deserialized();
sfi->set_deserialized(sfi->is_compiled());
SerializeGeneric(obj, how_to_code, where_to_point);
sfi->set_deserialized(was_deserialized);
+ sfi->set_debug_info(debug_info);
return;
}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index edc1c2bf1d..8dd5131eb1 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -11,6 +11,38 @@
namespace v8 {
namespace internal {
+class ScriptData {
+ public:
+ ScriptData(const byte* data, int length);
+ ~ScriptData() {
+ if (owns_data_) DeleteArray(data_);
+ }
+
+ const byte* data() const { return data_; }
+ int length() const { return length_; }
+ bool rejected() const { return rejected_; }
+
+ void Reject() { rejected_ = true; }
+
+ void AcquireDataOwnership() {
+ DCHECK(!owns_data_);
+ owns_data_ = true;
+ }
+
+ void ReleaseDataOwnership() {
+ DCHECK(owns_data_);
+ owns_data_ = false;
+ }
+
+ private:
+ bool owns_data_ : 1;
+ bool rejected_ : 1;
+ const byte* data_;
+ int length_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptData);
+};
+
class CodeSerializer : public Serializer<> {
public:
static ScriptData* Serialize(Isolate* isolate,
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 4b51e89e85..d1e200ef1e 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -60,8 +60,9 @@ Deserializer<AllocatorT>::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
template <class AllocatorT>
-void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void Deserializer<AllocatorT>::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
// Builtins and bytecode handlers are deserialized in a separate pass by the
// BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
@@ -246,11 +247,12 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
// fields in the serializer.
BytecodeArray* bytecode_array = BytecodeArray::cast(obj);
bytecode_array->set_interrupt_budget(
- interpreter::Interpreter::kInterruptBudget);
+ interpreter::Interpreter::InterruptBudget());
bytecode_array->set_osr_loop_nesting_level(0);
}
// Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(obj->map())));
return obj;
}
@@ -378,8 +380,11 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
CASE_STATEMENT(where, how, within, NEW_SPACE) \
CASE_BODY(where, how, within, NEW_SPACE) \
CASE_STATEMENT(where, how, within, OLD_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
@@ -480,9 +485,9 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
Address pc = code->entry() + pc_offset;
Address target = code->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
- isolate, pc, target, data == kInternalReference
- ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ pc, target,
+ data == kInternalReference ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
break;
}
@@ -585,7 +590,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int skip = source_.GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<intptr_t>(current) + skip);
- // Fall through.
+ V8_FALLTHROUGH;
}
SIXTEEN_CASES(kRootArrayConstants)
@@ -604,7 +609,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int skip = source_.GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<Address>(current) + skip);
- // Fall through.
+ V8_FALLTHROUGH;
}
FOUR_CASES(kHotObject)
@@ -643,12 +648,17 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
break;
}
+#ifdef DEBUG
+#define UNUSED_CASE(byte_code) \
+ case byte_code: \
+ UNREACHABLE();
+ UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
+#endif
+#undef UNUSED_CASE
+
#undef SIXTEEN_CASES
#undef FOUR_CASES
#undef SINGLE_CASE
-
- default:
- UNREACHABLE();
}
}
CHECK_EQ(limit, current);
@@ -746,7 +756,7 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
if (how == kFromCode) {
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::deserialization_set_special_target_at(
- isolate, location_of_branch_data,
+ location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)),
reinterpret_cast<Address>(new_object));
location_of_branch_data += Assembler::kSpecialTargetSize;
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 5c9bda43ac..cd563e46a1 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -96,7 +96,8 @@ class Deserializer : public SerializerDeserializer {
void Rehash();
private:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 3f92e7757f..bd8757e318 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -76,7 +76,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, &root);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root));
@@ -93,8 +93,7 @@ void ObjectDeserializer::
for (Code* code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
isolate()->heap()->RecordWritesIntoCode(code);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 41df5dbba7..6446f5e93f 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -42,7 +42,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
OldSpace* code_space = isolate->heap()->code_space();
Address start_address = code_space->top();
Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, &root);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index baac565a11..6661d9799f 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -42,7 +42,8 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
context_->set_math_random_index(Smi::kZero);
context_->set_math_random_cache(isolate()->heap()->undefined_value());
- VisitRootPointer(Root::kPartialSnapshotCache, reinterpret_cast<Object**>(o));
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ reinterpret_cast<Object**>(o));
SerializeDeferredObjects();
SerializeEmbedderFields();
Pad();
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 71436fe8fd..d928b02ba1 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -111,7 +111,8 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
if (cache->size() <= i) cache->push_back(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
- visitor->VisitRootPointer(Root::kPartialSnapshotCache, &cache->at(i));
+ visitor->VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ &cache->at(i));
if (cache->at(i)->IsUndefined(isolate)) break;
}
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 7d3d66a08d..f68694d5b8 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -116,6 +116,40 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo*>& call_handler_infos);
+#define UNUSED_SERIALIZER_BYTE_CODES(V) \
+ V(0x1d) \
+ V(0x1e) \
+ V(0x55) \
+ V(0x56) \
+ V(0x57) \
+ V(0x75) \
+ V(0x76) \
+ V(0x77) \
+ V(0x78) \
+ V(0x79) \
+ V(0x7a) \
+ V(0x7b) \
+ V(0x7c) \
+ V(0x7d) \
+ V(0x7e) \
+ V(0x7f) \
+ V(0xf0) \
+ V(0xf1) \
+ V(0xf2) \
+ V(0xf3) \
+ V(0xf4) \
+ V(0xf5) \
+ V(0xf6) \
+ V(0xf7) \
+ V(0xf8) \
+ V(0xf9) \
+ V(0xfa) \
+ V(0xfb) \
+ V(0xfc) \
+ V(0xfd) \
+ V(0xfe) \
+ V(0xff)
+
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
@@ -196,8 +230,6 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1c;
- // 0x1d, 0x1e unused.
-
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
@@ -217,8 +249,6 @@ class SerializerDeserializer : public RootVisitor {
static const int kHotObjectWithSkip = 0x58;
static const int kHotObjectMask = 0x07;
- // 0x55..0x57, 0x75..0x7f unused.
-
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
static const int kNumberOfRootArrayConstants = 0x20;
@@ -241,8 +271,6 @@ class SerializerDeserializer : public RootVisitor {
static const int kFixedRepeat = 0xe0;
static const int kFixedRepeatStart = kFixedRepeat - 1;
- // 0xf0..0xff unused.
-
// ---------- special values ----------
static const int kAnyOldSpace = -1;
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 87e4fe8fdc..b477227154 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -92,8 +92,9 @@ bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void Serializer<AllocatorT>::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
// Builtins and bytecode handlers are serialized in a separate pass by the
// BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
@@ -283,7 +284,7 @@ void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
template <class AllocatorT>
int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
byte prefix = (kAlignmentPrefix - 1) + alignment;
@@ -885,7 +886,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
- rinfo->WipeOut(serializer_->isolate());
+ rinfo->WipeOut();
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 22dcb26c8c..586c8802c0 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -7,6 +7,7 @@
#include <map>
+#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
@@ -116,6 +117,15 @@ class CodeAddressMap : public CodeEventLogger {
const char* name, int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override {
+ address_to_name_map_.Insert(stream->bytes(), name, length);
+ }
+
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override {
+ UNREACHABLE();
+ }
NameMap address_to_name_map_;
Isolate* isolate_;
@@ -162,7 +172,8 @@ class Serializer : public SerializerDeserializer {
virtual bool MustBeDeferred(HeapObject* object);
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 2bf50cc748..534339b2e5 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -89,6 +89,10 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
// static
Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
+ }
+
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -116,9 +120,33 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
}
// static
+void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
+ if (!FLAG_lazy_deserialization) return;
+
+ Builtins* builtins = isolate->builtins();
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsLazy(i)) continue;
+
+ DCHECK_NE(Builtins::kDeserializeLazy, i);
+ Code* code = builtins->builtin(i);
+ if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ code = Snapshot::DeserializeBuiltin(isolate, i);
+ }
+
+ DCHECK_EQ(i, code->builtin_index());
+ DCHECK_EQ(code, builtins->builtin(i));
+ }
+}
+
+// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing handler %s\n",
+ interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str());
+ }
+
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -312,16 +340,16 @@ void Snapshot::CheckVersion(const v8::StartupData* data) {
CHECK_LT(kVersionStringOffset + kVersionStringLength,
static_cast<uint32_t>(data->raw_size));
Version::GetString(Vector<char>(version, kVersionStringLength));
- if (memcmp(version, data->data + kVersionStringOffset,
- kVersionStringLength) != 0) {
- V8_Fatal(__FILE__, __LINE__,
- "Version mismatch between V8 binary and snapshot.\n"
- "# V8 binary version: %.*s\n"
- "# Snapshot version: %.*s\n"
- "# The snapshot consists of %d bytes and contains %d context(s).",
- kVersionStringLength, version, kVersionStringLength,
- data->data + kVersionStringOffset, data->raw_size,
- ExtractNumContexts(data));
+ if (strncmp(version, data->data + kVersionStringOffset,
+ kVersionStringLength) != 0) {
+ FATAL(
+ "Version mismatch between V8 binary and snapshot.\n"
+ "# V8 binary version: %.*s\n"
+ "# Snapshot version: %.*s\n"
+ "# The snapshot consists of %d bytes and contains %d context(s).",
+ kVersionStringLength, version, kVersionStringLength,
+ data->data + kVersionStringOffset, data->raw_size,
+ ExtractNumContexts(data));
}
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 8f37e00c4a..23d6e3689f 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -97,6 +97,7 @@ class Snapshot : public AllStatic {
// runtime after the isolate (and the builtins table) has been fully
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
+ static void EnsureAllBuiltinsAreDeserialized(Isolate* isolate);
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index e6f853fe0e..d0369984b8 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -78,8 +78,7 @@ void StartupDeserializer::FlushICacheForNewIsolate() {
DCHECK(!deserializing_user_code());
// The entire isolate is newly deserialized. Simply flush all code pages.
for (Page* p : *isolate()->heap()->code_space()) {
- Assembler::FlushICache(isolate(), p->area_start(),
- p->area_end() - p->area_start());
+ Assembler::FlushICache(p->area_start(), p->area_end() - p->area_start());
}
}
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 5ae6e33b87..b02d572595 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/api.h"
+#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/v8threads.h"
@@ -94,7 +95,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// add entries to the partial snapshot cache of the startup snapshot. Add
// one entry with 'undefined' to terminate the partial snapshot cache.
Object* undefined = isolate()->heap()->undefined_value();
- VisitRootPointer(Root::kPartialSnapshotCache, &undefined);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &undefined);
isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
Pad();
@@ -106,7 +107,7 @@ int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
// This object is not part of the partial snapshot cache yet. Add it to the
// startup snapshot so we can refer to it via partial snapshot index from
// the partial snapshot.
- VisitRootPointer(Root::kPartialSnapshotCache,
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
reinterpret_cast<Object**>(&heap_object));
}
return index;
@@ -133,8 +134,8 @@ void StartupSerializer::SerializeStrongReferences() {
isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
-void StartupSerializer::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void StartupSerializer::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
if (start == isolate()->heap()->roots_array_start()) {
// Serializing the root list needs special handling:
// - The first pass over the root list only serializes immortal immovables.
@@ -156,7 +157,7 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
}
FlushSkip(skip);
} else {
- Serializer::VisitRootPointers(root, start, end);
+ Serializer::VisitRootPointers(root, description, start, end);
}
}
@@ -197,8 +198,9 @@ void SerializedHandleChecker::AddToSet(FixedArray* serialized) {
for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
}
-void SerializedHandleChecker::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void SerializedHandleChecker::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (serialized_.find(*p) != serialized_.end()) continue;
PrintF("%s handle not serialized: ",
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index ae2a9f49df..ad440965b0 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -63,7 +63,8 @@ class StartupSerializer : public Serializer<> {
// The StartupSerializer has to serialize the root array, which is slightly
// different.
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
@@ -86,7 +87,8 @@ class StartupSerializer : public Serializer<> {
class SerializedHandleChecker : public RootVisitor {
public:
SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
- virtual void VisitRootPointers(Root root, Object** start, Object** end);
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end);
bool CheckGlobalAndEternalHandles();
private:
diff --git a/deps/v8/src/string-case.h b/deps/v8/src/string-case.h
index 3fe3bc2b81..f57bae494f 100644
--- a/deps/v8/src/string-case.h
+++ b/deps/v8/src/string-case.h
@@ -14,4 +14,4 @@ int FastAsciiConvert(char* dst, const char* src, int length, bool* changed_out);
} // namespace internal
} // namespace v8
-#endif // V8_STRING_CASE_H__
+#endif // V8_STRING_CASE_H_
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
deleted file mode 100644
index aaf521f310..0000000000
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- },
- 'includes': ['../../../gypfiles/toolchain.gypi', '../../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'v8_vtune',
- 'type': 'static_library',
- 'dependencies': [
- '../../v8.gyp:v8',
- ],
- 'sources': [
- 'ittnotify_config.h',
- 'ittnotify_types.h',
- 'jitprofiling.cc',
- 'jitprofiling.h',
- 'v8-vtune.h',
- 'vtune-jit.cc',
- 'vtune-jit.h',
- ],
- 'direct_dependent_settings': {
- 'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
- 'conditions': [
- ['OS != "win"', {
- 'libraries': ['-ldl',],
- }],
- ],
- },
- },
- ],
-}
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index bc73996be9..bd56f8a555 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_TRACING_TRACE_EVENT_H_
-#define SRC_TRACING_TRACE_EVENT_H_
+#ifndef V8_TRACING_TRACE_EVENT_H_
+#define V8_TRACING_TRACE_EVENT_H_
#include <stddef.h>
#include <memory>
@@ -686,4 +686,4 @@ class CallStatsScopedTracer {
} // namespace internal
} // namespace v8
-#endif // SRC_TRACING_TRACE_EVENT_H_
+#endif // V8_TRACING_TRACE_EVENT_H_
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 2c9225d485..4dc7057782 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -34,10 +34,10 @@
namespace {
size_t gNextCodeObject = 0;
-#if defined(DEBUG)
-const bool kEnableDebug = true;
+#ifdef DEBUG
+constexpr bool kEnableDebug = true;
#else
-const bool kEnableDebug = false;
+constexpr bool kEnableDebug = false;
#endif
}
@@ -54,7 +54,7 @@ constexpr size_t HandlerDataSize(size_t num_protected_instructions) {
}
namespace {
-template <typename = std::enable_if<kEnableDebug>>
+#ifdef DEBUG
bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
if (a == nullptr || b == nullptr) {
return true;
@@ -65,6 +65,7 @@ bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
return a_base >= b_base + b->size || b_base >= a_base + a->size;
}
+#endif
// Verify that the code range does not overlap any that have already been
// registered.
@@ -181,6 +182,7 @@ int RegisterHandlerData(
new_size = int_max;
}
if (new_size == gNumCodeObjects) {
+ free(data);
return kInvalidIndex;
}
@@ -215,6 +217,7 @@ int RegisterHandlerData(
return static_cast<int>(i);
} else {
+ free(data);
return kInvalidIndex;
}
}
diff --git a/deps/v8/src/trap-handler/trap-handler-internal.h b/deps/v8/src/trap-handler/trap-handler-internal.h
index 1476eb844b..a8cc371c36 100644
--- a/deps/v8/src/trap-handler/trap-handler-internal.h
+++ b/deps/v8/src/trap-handler/trap-handler-internal.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef TRAP_HANDLER_INTERNAL_H_
-#define TRAP_HANDLER_INTERNAL_H_
+#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
+#define V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
// This file should not be included (even transitively) by files outside of
// src/trap-handler.
@@ -79,4 +79,4 @@ extern bool g_is_default_signal_handler_registered;
} // namespace internal
} // namespace v8
-#endif // TRAP_HANDLER_INTERNAL_H_
+#endif // V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 1e02eeb34c..d410a19322 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TRAP_HANDLER_H_
-#define V8_TRAP_HANDLER_H_
+#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_H_
+#define V8_TRAP_HANDLER_TRAP_HANDLER_H_
#include <signal.h>
#include <stdint.h>
@@ -101,4 +101,4 @@ size_t GetRecoveredTrapCount();
} // namespace internal
} // namespace v8
-#endif // V8_TRAP_HANDLER_H_
+#endif // V8_TRAP_HANDLER_TRAP_HANDLER_H_
diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc
index d2360b6c68..6074bae81d 100644
--- a/deps/v8/src/unicode-decoder.cc
+++ b/deps/v8/src/unicode-decoder.cc
@@ -10,74 +10,78 @@
namespace unibrow {
+uint16_t Utf8Iterator::operator*() {
+ if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode)) {
+ return trailing_ ? Utf16::TrailSurrogate(char_)
+ : Utf16::LeadSurrogate(char_);
+ }
+
+ DCHECK_EQ(trailing_, false);
+ return char_;
+}
+
+Utf8Iterator& Utf8Iterator::operator++() {
+ if (V8_UNLIKELY(this->Done())) {
+ char_ = Utf8::kBufferEmpty;
+ return *this;
+ }
+
+ if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode && !trailing_)) {
+ trailing_ = true;
+ return *this;
+ }
+
+ trailing_ = false;
+ offset_ = cursor_;
+
+ char_ =
+ Utf8::ValueOf(reinterpret_cast<const uint8_t*>(stream_.begin()) + cursor_,
+ stream_.length() - cursor_, &cursor_);
+ return *this;
+}
+
+Utf8Iterator Utf8Iterator::operator++(int) {
+ Utf8Iterator old(*this);
+ ++*this;
+ return old;
+}
+
+bool Utf8Iterator::Done() {
+ return offset_ == static_cast<size_t>(stream_.length());
+}
+
void Utf8DecoderBase::Reset(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length) {
- // Assume everything will fit in the buffer and stream won't be needed.
- last_byte_of_buffer_unused_ = false;
- unbuffered_start_ = nullptr;
- unbuffered_length_ = 0;
- bool writing_to_buffer = true;
- // Loop until stream is read, writing to buffer as long as buffer has space.
+ const v8::internal::Vector<const char>& stream) {
size_t utf16_length = 0;
- while (stream_length != 0) {
- size_t cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
- DCHECK(cursor > 0 && cursor <= stream_length);
- stream += cursor;
- stream_length -= cursor;
- bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // Don't need to write to the buffer, but still need utf16_length.
- if (!writing_to_buffer) continue;
- // Write out the characters to the buffer.
- // Must check for equality with buffer_length as we've already updated it.
- if (utf16_length <= buffer_length) {
- if (is_two_characters) {
- *buffer++ = Utf16::LeadSurrogate(character);
- *buffer++ = Utf16::TrailSurrogate(character);
- } else {
- *buffer++ = character;
- }
- if (utf16_length == buffer_length) {
- // Just wrote last character of buffer
- writing_to_buffer = false;
- unbuffered_start_ = stream;
- unbuffered_length_ = stream_length;
- }
- continue;
- }
- // Have gone over buffer.
- // Last char of buffer is unused, set cursor back.
- DCHECK(is_two_characters);
- writing_to_buffer = false;
- last_byte_of_buffer_unused_ = true;
- unbuffered_start_ = stream - cursor;
- unbuffered_length_ = stream_length + cursor;
+
+ Utf8Iterator it = Utf8Iterator(stream);
+ // Loop until stream is read, writing to buffer as long as buffer has space.
+ while (utf16_length < buffer_length && !it.Done()) {
+ *buffer++ = *it;
+ ++it;
+ utf16_length++;
+ }
+ bytes_read_ = it.Offset();
+ trailing_ = it.Trailing();
+ chars_written_ = utf16_length;
+
+ // Now that writing to buffer is done, we just need to calculate utf16_length
+ while (!it.Done()) {
+ ++it;
+ utf16_length++;
}
utf16_length_ = utf16_length;
}
-
-void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
- size_t stream_length, uint16_t* data,
- size_t data_length) {
- while (data_length != 0) {
- size_t cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
- // There's a total lack of bounds checking for stream
- // as it was already done in Reset.
- stream += cursor;
- DCHECK(stream_length >= cursor);
- stream_length -= cursor;
- if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *data++ = Utf16::LeadSurrogate(character);
- *data++ = Utf16::TrailSurrogate(character);
- DCHECK_GT(data_length, 1);
- data_length -= 2;
- } else {
- *data++ = character;
- data_length -= 1;
- }
+void Utf8DecoderBase::WriteUtf16Slow(
+ uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream, size_t offset,
+ bool trailing) {
+ Utf8Iterator it = Utf8Iterator(stream, offset, trailing);
+ while (!it.Done()) {
+ DCHECK_GT(length--, 0);
+ *data++ = *it;
+ ++it;
}
}
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index 38a1837af3..ab69d0d390 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -6,30 +6,73 @@
#define V8_UNICODE_DECODER_H_
#include <sys/types.h>
+#include <algorithm>
#include "src/globals.h"
+#include "src/unicode.h"
#include "src/utils.h"
+#include "src/vector.h"
namespace unibrow {
+class Utf8Iterator {
+ public:
+ explicit Utf8Iterator(const v8::internal::Vector<const char>& stream)
+ : Utf8Iterator(stream, 0, false) {}
+ Utf8Iterator(const v8::internal::Vector<const char>& stream, size_t offset,
+ bool trailing)
+ : stream_(stream),
+ cursor_(offset),
+ offset_(0),
+ char_(0),
+ trailing_(false) {
+ DCHECK_LE(offset, stream.length());
+ // Read the first char, setting offset_ to offset in the process.
+ ++*this;
+
+ // This must be set after reading the first char, since the offset marks
+ // the start of the octet sequence that the trailing char is part of.
+ trailing_ = trailing;
+ if (trailing) {
+ DCHECK_GT(char_, Utf16::kMaxNonSurrogateCharCode);
+ }
+ }
+
+ uint16_t operator*();
+ Utf8Iterator& operator++();
+ Utf8Iterator operator++(int);
+ bool Done();
+ bool Trailing() { return trailing_; }
+ size_t Offset() { return offset_; }
+
+ private:
+ const v8::internal::Vector<const char>& stream_;
+ size_t cursor_;
+ size_t offset_;
+ uint32_t char_;
+ bool trailing_;
+};
+
class V8_EXPORT_PRIVATE Utf8DecoderBase {
public:
// Initialization done in subclass.
inline Utf8DecoderBase();
inline Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length);
+ const v8::internal::Vector<const char>& stream);
inline size_t Utf16Length() const { return utf16_length_; }
protected:
// This reads all characters and sets the utf16_length_.
// The first buffer_length utf16 chars are cached in the buffer.
- void Reset(uint16_t* buffer, size_t buffer_length, const uint8_t* stream,
- size_t stream_length);
- static void WriteUtf16Slow(const uint8_t* stream, size_t stream_length,
- uint16_t* data, size_t length);
- const uint8_t* unbuffered_start_;
- size_t unbuffered_length_;
+ void Reset(uint16_t* buffer, size_t buffer_length,
+ const v8::internal::Vector<const char>& vector);
+ static void WriteUtf16Slow(uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream,
+ size_t offset, bool trailing);
+
+ size_t bytes_read_;
+ size_t chars_written_;
size_t utf16_length_;
- bool last_byte_of_buffer_unused_;
+ bool trailing_;
private:
DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
@@ -39,69 +82,63 @@ template <size_t kBufferSize>
class Utf8Decoder : public Utf8DecoderBase {
public:
inline Utf8Decoder() {}
- inline Utf8Decoder(const char* stream, size_t length);
- inline void Reset(const char* stream, size_t length);
- inline size_t WriteUtf16(uint16_t* data, size_t length) const;
+ explicit inline Utf8Decoder(const v8::internal::Vector<const char>& stream);
+ inline void Reset(const v8::internal::Vector<const char>& stream);
+ inline size_t WriteUtf16(
+ uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream) const;
private:
uint16_t buffer_[kBufferSize];
};
Utf8DecoderBase::Utf8DecoderBase()
- : unbuffered_start_(nullptr),
- unbuffered_length_(0),
- utf16_length_(0),
- last_byte_of_buffer_unused_(false) {}
-
-Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length) {
- Reset(buffer, buffer_length, stream, stream_length);
-}
+ : bytes_read_(0), chars_written_(0), utf16_length_(0), trailing_(false) {}
+Utf8DecoderBase::Utf8DecoderBase(
+ uint16_t* buffer, size_t buffer_length,
+ const v8::internal::Vector<const char>& stream) {
+ Reset(buffer, buffer_length, stream);
+}
template <size_t kBufferSize>
-Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, size_t length)
- : Utf8DecoderBase(buffer_, kBufferSize,
- reinterpret_cast<const uint8_t*>(stream), length) {}
-
+Utf8Decoder<kBufferSize>::Utf8Decoder(
+ const v8::internal::Vector<const char>& stream)
+ : Utf8DecoderBase(buffer_, kBufferSize, stream) {}
template <size_t kBufferSize>
-void Utf8Decoder<kBufferSize>::Reset(const char* stream, size_t length) {
- Utf8DecoderBase::Reset(buffer_, kBufferSize,
- reinterpret_cast<const uint8_t*>(stream), length);
+void Utf8Decoder<kBufferSize>::Reset(
+ const v8::internal::Vector<const char>& stream) {
+ Utf8DecoderBase::Reset(buffer_, kBufferSize, stream);
}
-
template <size_t kBufferSize>
-size_t Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
- size_t length) const {
- DCHECK_GT(length, 0);
- if (length > utf16_length_) length = utf16_length_;
+size_t Utf8Decoder<kBufferSize>::WriteUtf16(
+ uint16_t* data, size_t data_length,
+ const v8::internal::Vector<const char>& stream) const {
+ DCHECK_GT(data_length, 0);
+ data_length = std::min(data_length, utf16_length_);
+
// memcpy everything in buffer.
- size_t buffer_length =
- last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
- size_t memcpy_length = length <= buffer_length ? length : buffer_length;
+ size_t memcpy_length = std::min(data_length, chars_written_);
v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
- if (length <= buffer_length) return length;
- DCHECK_NOT_NULL(unbuffered_start_);
+
+ if (data_length <= chars_written_) return data_length;
+
// Copy the rest the slow way.
- WriteUtf16Slow(unbuffered_start_, unbuffered_length_, data + buffer_length,
- length - buffer_length);
- return length;
+ WriteUtf16Slow(data + chars_written_, data_length - chars_written_, stream,
+ bytes_read_, trailing_);
+ return data_length;
}
class Latin1 {
public:
static const unsigned kMaxChar = 0xff;
- // Returns 0 if character does not convert to single latin-1 character
- // or if the character doesn't not convert back to latin-1 via inverse
- // operation (upper to lower, etc).
- static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t);
+ // Convert the character to Latin-1 case equivalent if possible.
+ static inline uint16_t TryConvertToLatin1(uint16_t);
};
-
-uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
- DCHECK_GT(c, Latin1::kMaxChar);
+uint16_t Latin1::TryConvertToLatin1(uint16_t c) {
switch (c) {
// This are equivalent characters in unicode.
case 0x39c:
@@ -112,7 +149,7 @@ uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
case 0x178:
return 0xff;
}
- return 0;
+ return c;
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index c6ce9a8eb2..75f53e22d1 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -204,6 +204,10 @@ V8_INLINE bool IsLineTerminator(uchar c) {
return c == 0x000A || c == 0x000D || c == 0x2028 || c == 0x2029;
}
+V8_INLINE bool IsStringLiteralLineTerminator(uchar c) {
+ return c == 0x000A || c == 0x000D;
+}
+
#ifndef V8_INTL_SUPPORT
struct ToLowercase {
static const int kMaxWidth = 3;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 5b5d95ce9a..f3e2718fe9 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -18,6 +18,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/v8-fallthrough.h"
#include "src/globals.h"
#include "src/vector.h"
#include "src/zone/zone.h"
@@ -79,9 +80,15 @@ inline int WhichPowerOf2(T x) {
#undef CHECK_BIGGER
switch (x) {
default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
+ case 8:
+ bits++;
+ V8_FALLTHROUGH;
+ case 4:
+ bits++;
+ V8_FALLTHROUGH;
+ case 2:
+ bits++;
+ V8_FALLTHROUGH;
case 1: break;
}
DCHECK_EQ(T{1} << bits, original_x);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index a6d97e8ff1..ab4918efec 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -81,7 +81,6 @@ void V8::InitializeOncePerProcessImpl() {
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
- SetUpJSCallerSavedCodeData();
ExternalReference::SetUp();
Bootstrapper::InitializeOncePerProcess();
}
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
deleted file mode 100644
index 1f0ad0a467..0000000000
--- a/deps/v8/src/v8.gyp
+++ /dev/null
@@ -1,2630 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'v8_random_seed%': 314159265,
- 'v8_vector_stores%': 0,
- 'embed_script%': "",
- 'warmup_script%': "",
- 'v8_extra_library_files%': [],
- 'v8_experimental_extra_library_files%': [],
- 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
- 'v8_os_page_size%': 0,
- },
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
- 'targets': [
- {
- 'target_name': 'v8',
- 'dependencies_traverse': 1,
- 'dependencies': ['v8_maybe_snapshot', 'v8_dump_build_config#target'],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'type': '<(component)',
- 'sources': [
- # Note: on non-Windows we still build this file so that gyp
- # has some sources to link into the component.
- 'v8dll-main.cc',
- ],
- 'include_dirs': [
- '..',
- ],
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- 'conditions': [
- ['OS=="mac"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
- },
- }],
- ['soname_version!=""', {
- 'product_extension': 'so.<(soname_version)',
- }],
- ],
- },
- {
- 'type': 'none',
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- # This rule delegates to either v8_snapshot, v8_nosnapshot, or
- # v8_external_snapshot, depending on the current variables.
- # The intention is to make the 'calling' rules a bit simpler.
- 'target_name': 'v8_maybe_snapshot',
- 'type': 'none',
- 'conditions': [
- ['v8_use_snapshot!="true"', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_init', 'v8_nosnapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_snapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'target_conditions': [
- ['_toolset=="host"', {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
- }, {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- }],
- ],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'v8_init',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_initializers',
- ],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [ ### gcmole(all) ###
- 'setup-isolate-full.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- {
- 'target_name': 'v8_initializers',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base',
- ],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [ ### gcmole(all) ###
- 'builtins/builtins-arguments-gen.cc',
- 'builtins/builtins-arguments-gen.h',
- 'builtins/builtins-array-gen.cc',
- 'builtins/builtins-async-function-gen.cc',
- 'builtins/builtins-async-gen.cc',
- 'builtins/builtins-async-gen.h',
- 'builtins/builtins-async-generator-gen.cc',
- 'builtins/builtins-async-iterator-gen.cc',
- 'builtins/builtins-boolean-gen.cc',
- 'builtins/builtins-call-gen.cc',
- 'builtins/builtins-call-gen.h',
- 'builtins/builtins-collections-gen.cc',
- 'builtins/builtins-console-gen.cc',
- 'builtins/builtins-constructor-gen.cc',
- 'builtins/builtins-constructor-gen.h',
- 'builtins/builtins-constructor.h',
- 'builtins/builtins-conversion-gen.cc',
- 'builtins/builtins-date-gen.cc',
- 'builtins/builtins-debug-gen.cc',
- 'builtins/builtins-function-gen.cc',
- 'builtins/builtins-generator-gen.cc',
- 'builtins/builtins-global-gen.cc',
- 'builtins/builtins-handler-gen.cc',
- 'builtins/builtins-ic-gen.cc',
- 'builtins/builtins-internal-gen.cc',
- 'builtins/builtins-interpreter-gen.cc',
- 'builtins/builtins-intl-gen.cc',
- 'builtins/builtins-iterator-gen.h',
- 'builtins/builtins-iterator-gen.cc',
- 'builtins/builtins-math-gen.cc',
- 'builtins/builtins-math-gen.h',
- 'builtins/builtins-number-gen.cc',
- 'builtins/builtins-object-gen.cc',
- 'builtins/builtins-promise-gen.cc',
- 'builtins/builtins-promise-gen.h',
- 'builtins/builtins-proxy-gen.cc',
- 'builtins/builtins-proxy-gen.h',
- 'builtins/builtins-reflect-gen.cc',
- 'builtins/builtins-regexp-gen.cc',
- 'builtins/builtins-regexp-gen.h',
- 'builtins/builtins-sharedarraybuffer-gen.cc',
- 'builtins/builtins-string-gen.cc',
- 'builtins/builtins-string-gen.h',
- 'builtins/builtins-symbol-gen.cc',
- 'builtins/builtins-typedarray-gen.cc',
- 'builtins/builtins-utils-gen.h',
- 'builtins/builtins-wasm-gen.cc',
- 'builtins/setup-builtins-internal.cc',
- 'heap/setup-heap-internal.cc',
- 'ic/accessor-assembler.cc',
- 'ic/accessor-assembler.h',
- 'ic/binary-op-assembler.cc',
- 'ic/binary-op-assembler.h',
- 'ic/keyed-store-generic.cc',
- 'ic/keyed-store-generic.h',
- 'interpreter/interpreter-assembler.cc',
- 'interpreter/interpreter-assembler.h',
- 'interpreter/interpreter-generator.cc',
- 'interpreter/interpreter-generator.h',
- 'interpreter/interpreter-intrinsics-generator.cc',
- 'interpreter/interpreter-intrinsics-generator.h',
- 'interpreter/setup-interpreter-internal.cc',
- 'interpreter/setup-interpreter.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'builtins/ia32/builtins-ia32.cc',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'builtins/x64/builtins-x64.cc',
- ],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'builtins/arm/builtins-arm.cc',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'builtins/arm64/builtins-arm64.cc',
- ],
- }],
- ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'builtins/mips/builtins-mips.cc',
- ],
- }],
- ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'builtins/mips64/builtins-mips64.cc',
- ],
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'builtins/ppc/builtins-ppc.cc',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'builtins/s390/builtins-s390.cc',
- ],
- }],
- ['v8_enable_i18n_support==0', {
- 'sources!': [
- 'builtins/builtins-intl-gen.cc',
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_snapshot',
- 'type': 'static_library',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- ],
- }, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- 'setup-isolate-deserialize.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot',
- 'inputs': [
- '<(mksnapshot_exec)',
- ],
- 'conditions': [
- ['embed_script!=""', {
- 'inputs': [
- '<(embed_script)',
- ],
- }],
- ['warmup_script!=""', {
- 'inputs': [
- '<(warmup_script)',
- ],
- }],
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ],
- },
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- },
- ],
- },
- {
- 'target_name': 'v8_nosnapshot',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'snapshot/snapshot-empty.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
- }, {
- 'toolsets': ['target'],
- 'dependencies': ['js2c'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ]
- },
- {
- 'target_name': 'v8_external_snapshot',
- 'type': 'static_library',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- 'natives_blob',
- ]}, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- 'natives_blob',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- 'setup-isolate-deserialize.cc',
- 'snapshot/natives-external.cc',
- 'snapshot/snapshot-external.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot (external)',
- 'inputs': [
- '<(mksnapshot_exec)',
- ],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ['v8_os_page_size!=0', {
- 'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
- }],
- ],
- },
- 'conditions': [
- ['embed_script!=""', {
- 'inputs': [
- '<(embed_script)',
- ],
- }],
- ['warmup_script!=""', {
- 'inputs': [
- '<(warmup_script)',
- ],
- }],
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- },
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_base',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_libbase',
- 'v8_libsampler',
- 'inspector/inspector.gyp:protocol_generated_sources#target',
- 'inspector/inspector.gyp:inspector_injected_script#target',
- ],
- 'objs': ['foo.o'],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- '<(SHARED_INTERMEDIATE_DIR)'
- ],
- 'sources': [ ### gcmole(all) ###
- '<@(inspector_all_sources)',
- '../include/v8-debug.h',
- '../include/v8-platform.h',
- '../include/v8-profiler.h',
- '../include/v8-testing.h',
- '../include/v8-util.h',
- '../include/v8-value-serializer-version.h',
- '../include/v8-version-string.h',
- '../include/v8-version.h',
- '../include/v8.h',
- '../include/v8config.h',
- 'accessors.cc',
- 'accessors.h',
- 'address-map.cc',
- 'address-map.h',
- 'allocation.cc',
- 'allocation.h',
- 'allocation-site-scopes.h',
- 'api.cc',
- 'api.h',
- 'api-arguments-inl.h',
- 'api-arguments.cc',
- 'api-arguments.h',
- 'api-natives.cc',
- 'api-natives.h',
- 'arguments.cc',
- 'arguments.h',
- 'asmjs/asm-js.cc',
- 'asmjs/asm-js.h',
- 'asmjs/asm-names.h',
- 'asmjs/asm-parser.cc',
- 'asmjs/asm-parser.h',
- 'asmjs/asm-scanner.cc',
- 'asmjs/asm-scanner.h',
- 'asmjs/asm-types.cc',
- 'asmjs/asm-types.h',
- 'asmjs/switch-logic.h',
- 'asmjs/switch-logic.cc',
- 'assembler.cc',
- 'assembler.h',
- 'assembler-inl.h',
- 'assert-scope.h',
- 'assert-scope.cc',
- 'ast/ast-function-literal-id-reindexer.cc',
- 'ast/ast-function-literal-id-reindexer.h',
- 'ast/ast-numbering.cc',
- 'ast/ast-numbering.h',
- 'ast/ast-source-ranges.h',
- 'ast/ast-traversal-visitor.h',
- 'ast/ast-value-factory.cc',
- 'ast/ast-value-factory.h',
- 'ast/ast.cc',
- 'ast/ast.h',
- 'ast/compile-time-value.cc',
- 'ast/compile-time-value.h',
- 'ast/context-slot-cache.cc',
- 'ast/context-slot-cache.h',
- 'ast/modules.cc',
- 'ast/modules.h',
- 'ast/prettyprinter.cc',
- 'ast/prettyprinter.h',
- 'ast/scopes.cc',
- 'ast/scopes.h',
- 'ast/variables.cc',
- 'ast/variables.h',
- 'bailout-reason.cc',
- 'bailout-reason.h',
- 'basic-block-profiler.cc',
- 'basic-block-profiler.h',
- 'bignum-dtoa.cc',
- 'bignum-dtoa.h',
- 'bignum.cc',
- 'bignum.h',
- 'bit-vector.cc',
- 'bit-vector.h',
- 'bootstrapper.cc',
- 'bootstrapper.h',
- 'boxed-float.h',
- 'builtins/builtins-api.cc',
- 'builtins/builtins-arraybuffer.cc',
- 'builtins/builtins-array.cc',
- 'builtins/builtins-bigint.cc',
- 'builtins/builtins-boolean.cc',
- 'builtins/builtins-call.cc',
- 'builtins/builtins-callsite.cc',
- 'builtins/builtins-collections.cc',
- 'builtins/builtins-console.cc',
- 'builtins/builtins-constructor.h',
- 'builtins/builtins-dataview.cc',
- 'builtins/builtins-date.cc',
- 'builtins/builtins-definitions.h',
- 'builtins/builtins-descriptors.h',
- 'builtins/builtins-error.cc',
- 'builtins/builtins-function.cc',
- 'builtins/builtins-global.cc',
- 'builtins/builtins-internal.cc',
- 'builtins/builtins-interpreter.cc',
- 'builtins/builtins-json.cc',
- 'builtins/builtins-math.cc',
- 'builtins/builtins-number.cc',
- 'builtins/builtins-object.cc',
- 'builtins/builtins-promise.cc',
- 'builtins/builtins-reflect.cc',
- 'builtins/builtins-regexp.cc',
- 'builtins/builtins-sharedarraybuffer.cc',
- 'builtins/builtins-string.cc',
- 'builtins/builtins-intl.cc',
- 'builtins/builtins-intl.h',
- 'builtins/builtins-symbol.cc',
- 'builtins/builtins-typedarray.cc',
- 'builtins/builtins-utils.h',
- 'builtins/builtins.cc',
- 'builtins/builtins.h',
- 'cached-powers.cc',
- 'cached-powers.h',
- 'callable.h',
- 'cancelable-task.cc',
- 'cancelable-task.h',
- 'char-predicates.cc',
- 'char-predicates-inl.h',
- 'char-predicates.h',
- 'checks.h',
- 'code-events.h',
- 'code-factory.cc',
- 'code-factory.h',
- 'code-stub-assembler.cc',
- 'code-stub-assembler.h',
- 'code-stubs.cc',
- 'code-stubs.h',
- 'code-stubs-utils.h',
- 'codegen.cc',
- 'codegen.h',
- 'collector.h',
- 'compilation-cache.cc',
- 'compilation-cache.h',
- 'compilation-dependencies.cc',
- 'compilation-dependencies.h',
- 'compilation-info.cc',
- 'compilation-info.h',
- 'compilation-statistics.cc',
- 'compilation-statistics.h',
- 'compiler/access-builder.cc',
- 'compiler/access-builder.h',
- 'compiler/access-info.cc',
- 'compiler/access-info.h',
- 'compiler/all-nodes.cc',
- 'compiler/all-nodes.h',
- 'compiler/allocation-builder.h',
- 'compiler/basic-block-instrumentor.cc',
- 'compiler/basic-block-instrumentor.h',
- 'compiler/branch-elimination.cc',
- 'compiler/branch-elimination.h',
- 'compiler/bytecode-analysis.cc',
- 'compiler/bytecode-analysis.h',
- 'compiler/bytecode-graph-builder.cc',
- 'compiler/bytecode-graph-builder.h',
- 'compiler/bytecode-liveness-map.cc',
- 'compiler/bytecode-liveness-map.h',
- 'compiler/c-linkage.cc',
- 'compiler/checkpoint-elimination.cc',
- 'compiler/checkpoint-elimination.h',
- 'compiler/code-generator-impl.h',
- 'compiler/code-generator.cc',
- 'compiler/code-generator.h',
- 'compiler/code-assembler.cc',
- 'compiler/code-assembler.h',
- 'compiler/common-node-cache.cc',
- 'compiler/common-node-cache.h',
- 'compiler/common-operator-reducer.cc',
- 'compiler/common-operator-reducer.h',
- 'compiler/common-operator.cc',
- 'compiler/common-operator.h',
- 'compiler/control-equivalence.cc',
- 'compiler/control-equivalence.h',
- 'compiler/control-flow-optimizer.cc',
- 'compiler/control-flow-optimizer.h',
- 'compiler/dead-code-elimination.cc',
- 'compiler/dead-code-elimination.h',
- 'compiler/diamond.h',
- 'compiler/effect-control-linearizer.cc',
- 'compiler/effect-control-linearizer.h',
- 'compiler/escape-analysis.cc',
- 'compiler/escape-analysis.h',
- 'compiler/escape-analysis-reducer.cc',
- 'compiler/escape-analysis-reducer.h',
- 'compiler/frame.cc',
- 'compiler/frame.h',
- 'compiler/frame-elider.cc',
- 'compiler/frame-elider.h',
- 'compiler/frame-states.cc',
- 'compiler/frame-states.h',
- 'compiler/gap-resolver.cc',
- 'compiler/gap-resolver.h',
- 'compiler/graph-assembler.cc',
- 'compiler/graph-assembler.h',
- 'compiler/graph-reducer.cc',
- 'compiler/graph-reducer.h',
- 'compiler/graph-trimmer.cc',
- 'compiler/graph-trimmer.h',
- 'compiler/graph-visualizer.cc',
- 'compiler/graph-visualizer.h',
- 'compiler/graph.cc',
- 'compiler/graph.h',
- 'compiler/instruction-codes.h',
- 'compiler/instruction-selector-impl.h',
- 'compiler/instruction-selector.cc',
- 'compiler/instruction-selector.h',
- 'compiler/instruction-scheduler.cc',
- 'compiler/instruction-scheduler.h',
- 'compiler/instruction.cc',
- 'compiler/instruction.h',
- 'compiler/int64-lowering.cc',
- 'compiler/int64-lowering.h',
- 'compiler/js-builtin-reducer.cc',
- 'compiler/js-builtin-reducer.h',
- 'compiler/js-call-reducer.cc',
- 'compiler/js-call-reducer.h',
- 'compiler/js-context-specialization.cc',
- 'compiler/js-context-specialization.h',
- 'compiler/js-create-lowering.cc',
- 'compiler/js-create-lowering.h',
- 'compiler/js-generic-lowering.cc',
- 'compiler/js-generic-lowering.h',
- 'compiler/js-graph.cc',
- 'compiler/js-graph.h',
- 'compiler/js-inlining.cc',
- 'compiler/js-inlining.h',
- 'compiler/js-inlining-heuristic.cc',
- 'compiler/js-inlining-heuristic.h',
- 'compiler/js-intrinsic-lowering.cc',
- 'compiler/js-intrinsic-lowering.h',
- 'compiler/js-native-context-specialization.cc',
- 'compiler/js-native-context-specialization.h',
- 'compiler/js-operator.cc',
- 'compiler/js-operator.h',
- 'compiler/js-type-hint-lowering.cc',
- 'compiler/js-type-hint-lowering.h',
- 'compiler/js-typed-lowering.cc',
- 'compiler/js-typed-lowering.h',
- 'compiler/jump-threading.cc',
- 'compiler/jump-threading.h',
- 'compiler/linkage.cc',
- 'compiler/linkage.h',
- 'compiler/live-range-separator.cc',
- 'compiler/live-range-separator.h',
- 'compiler/load-elimination.cc',
- 'compiler/load-elimination.h',
- 'compiler/loop-analysis.cc',
- 'compiler/loop-analysis.h',
- 'compiler/loop-peeling.cc',
- 'compiler/loop-peeling.h',
- 'compiler/loop-variable-optimizer.cc',
- 'compiler/loop-variable-optimizer.h',
- 'compiler/machine-operator-reducer.cc',
- 'compiler/machine-operator-reducer.h',
- 'compiler/machine-operator.cc',
- 'compiler/machine-operator.h',
- 'compiler/machine-graph-verifier.cc',
- 'compiler/machine-graph-verifier.h',
- 'compiler/memory-optimizer.cc',
- 'compiler/memory-optimizer.h',
- 'compiler/move-optimizer.cc',
- 'compiler/move-optimizer.h',
- 'compiler/node-aux-data.h',
- 'compiler/node-cache.cc',
- 'compiler/node-cache.h',
- 'compiler/node-marker.cc',
- 'compiler/node-marker.h',
- 'compiler/node-matchers.cc',
- 'compiler/node-matchers.h',
- 'compiler/node-properties.cc',
- 'compiler/node-properties.h',
- 'compiler/node.cc',
- 'compiler/node.h',
- 'compiler/opcodes.cc',
- 'compiler/opcodes.h',
- 'compiler/operation-typer.cc',
- 'compiler/operation-typer.h',
- 'compiler/operator-properties.cc',
- 'compiler/operator-properties.h',
- 'compiler/operator.cc',
- 'compiler/operator.h',
- 'compiler/osr.cc',
- 'compiler/osr.h',
- 'compiler/persistent-map.h',
- 'compiler/pipeline.cc',
- 'compiler/pipeline.h',
- 'compiler/pipeline-statistics.cc',
- 'compiler/pipeline-statistics.h',
- 'compiler/property-access-builder.cc',
- 'compiler/property-access-builder.h',
- 'compiler/raw-machine-assembler.cc',
- 'compiler/raw-machine-assembler.h',
- 'compiler/redundancy-elimination.cc',
- 'compiler/redundancy-elimination.h',
- 'compiler/register-allocator.cc',
- 'compiler/register-allocator.h',
- 'compiler/register-allocator-verifier.cc',
- 'compiler/register-allocator-verifier.h',
- 'compiler/representation-change.cc',
- 'compiler/representation-change.h',
- 'compiler/schedule.cc',
- 'compiler/schedule.h',
- 'compiler/scheduler.cc',
- 'compiler/scheduler.h',
- 'compiler/select-lowering.cc',
- 'compiler/select-lowering.h',
- 'compiler/simd-scalar-lowering.cc',
- 'compiler/simd-scalar-lowering.h',
- 'compiler/simplified-lowering.cc',
- 'compiler/simplified-lowering.h',
- 'compiler/simplified-operator-reducer.cc',
- 'compiler/simplified-operator-reducer.h',
- 'compiler/simplified-operator.cc',
- 'compiler/simplified-operator.h',
- 'compiler/compiler-source-position-table.cc',
- 'compiler/compiler-source-position-table.h',
- 'compiler/state-values-utils.cc',
- 'compiler/state-values-utils.h',
- 'compiler/store-store-elimination.cc',
- 'compiler/store-store-elimination.h',
- 'compiler/types.cc',
- 'compiler/types.h',
- 'compiler/type-cache.cc',
- 'compiler/type-cache.h',
- 'compiler/typed-optimization.cc',
- 'compiler/typed-optimization.h',
- 'compiler/typer.cc',
- 'compiler/typer.h',
- 'compiler/unwinding-info-writer.h',
- 'compiler/value-numbering-reducer.cc',
- 'compiler/value-numbering-reducer.h',
- 'compiler/verifier.cc',
- 'compiler/verifier.h',
- 'compiler/wasm-compiler.cc',
- 'compiler/wasm-compiler.h',
- 'compiler/wasm-linkage.cc',
- 'compiler/zone-stats.cc',
- 'compiler/zone-stats.h',
- 'compiler-dispatcher/compiler-dispatcher.cc',
- 'compiler-dispatcher/compiler-dispatcher.h',
- 'compiler-dispatcher/compiler-dispatcher-job.cc',
- 'compiler-dispatcher/compiler-dispatcher-job.h',
- 'compiler-dispatcher/compiler-dispatcher-tracer.cc',
- 'compiler-dispatcher/compiler-dispatcher-tracer.h',
- 'compiler-dispatcher/optimizing-compile-dispatcher.cc',
- 'compiler-dispatcher/optimizing-compile-dispatcher.h',
- 'compiler-dispatcher/unoptimized-compile-job.cc',
- 'compiler-dispatcher/unoptimized-compile-job.h',
- 'compiler.cc',
- 'compiler.h',
- 'contexts-inl.h',
- 'contexts.cc',
- 'contexts.h',
- 'conversions-inl.h',
- 'conversions.cc',
- 'conversions.h',
- 'counters-inl.h',
- 'counters.cc',
- 'counters.h',
- 'date.cc',
- 'date.h',
- 'dateparser-inl.h',
- 'dateparser.cc',
- 'dateparser.h',
- 'debug/debug-coverage.cc',
- 'debug/debug-coverage.h',
- 'debug/debug-evaluate.cc',
- 'debug/debug-evaluate.h',
- 'debug/debug-interface.h',
- 'debug/debug-frames.cc',
- 'debug/debug-frames.h',
- 'debug/debug-scope-iterator.cc',
- 'debug/debug-scope-iterator.h',
- 'debug/debug-scopes.cc',
- 'debug/debug-scopes.h',
- 'debug/debug-stack-trace-iterator.cc',
- 'debug/debug-stack-trace-iterator.h',
- 'debug/debug-type-profile.cc',
- 'debug/debug-type-profile.h',
- 'debug/debug.cc',
- 'debug/debug.h',
- 'debug/interface-types.h',
- 'debug/liveedit.cc',
- 'debug/liveedit.h',
- 'deoptimize-reason.cc',
- 'deoptimize-reason.h',
- 'deoptimizer.cc',
- 'deoptimizer.h',
- 'detachable-vector.h',
- 'disasm.h',
- 'disassembler.cc',
- 'disassembler.h',
- 'diy-fp.cc',
- 'diy-fp.h',
- 'double.h',
- 'dtoa.cc',
- 'dtoa.h',
- 'eh-frame.cc',
- 'eh-frame.h',
- 'elements-kind.cc',
- 'elements-kind.h',
- 'elements.cc',
- 'elements.h',
- 'execution.cc',
- 'execution.h',
- 'extensions/externalize-string-extension.cc',
- 'extensions/externalize-string-extension.h',
- 'extensions/free-buffer-extension.cc',
- 'extensions/free-buffer-extension.h',
- 'extensions/gc-extension.cc',
- 'extensions/gc-extension.h',
- 'extensions/ignition-statistics-extension.cc',
- 'extensions/ignition-statistics-extension.h',
- 'extensions/statistics-extension.cc',
- 'extensions/statistics-extension.h',
- 'extensions/trigger-failure-extension.cc',
- 'extensions/trigger-failure-extension.h',
- 'external-reference-table.cc',
- 'external-reference-table.h',
- 'factory-inl.h',
- 'factory.cc',
- 'factory.h',
- 'fast-dtoa.cc',
- 'fast-dtoa.h',
- 'feedback-vector-inl.h',
- 'feedback-vector.cc',
- 'feedback-vector.h',
- 'field-index.h',
- 'field-index-inl.h',
- 'field-type.cc',
- 'field-type.h',
- 'fixed-dtoa.cc',
- 'fixed-dtoa.h',
- 'flag-definitions.h',
- 'flags.cc',
- 'flags.h',
- 'frame-constants.h',
- 'frames-inl.h',
- 'frames.cc',
- 'frames.h',
- 'futex-emulation.cc',
- 'futex-emulation.h',
- 'gdb-jit.cc',
- 'gdb-jit.h',
- 'global-handles.cc',
- 'global-handles.h',
- 'globals.h',
- 'handles-inl.h',
- 'handles.cc',
- 'handles.h',
- 'heap-symbols.h',
- 'heap/array-buffer-collector.cc',
- 'heap/array-buffer-collector.h',
- 'heap/array-buffer-tracker-inl.h',
- 'heap/array-buffer-tracker.cc',
- 'heap/array-buffer-tracker.h',
- 'heap/barrier.h',
- 'heap/code-stats.cc',
- 'heap/code-stats.h',
- 'heap/concurrent-marking.cc',
- 'heap/concurrent-marking.h',
- 'heap/embedder-tracing.cc',
- 'heap/embedder-tracing.h',
- 'heap/memory-reducer.cc',
- 'heap/memory-reducer.h',
- 'heap/gc-idle-time-handler.cc',
- 'heap/gc-idle-time-handler.h',
- 'heap/gc-tracer.cc',
- 'heap/gc-tracer.h',
- 'heap/heap-inl.h',
- 'heap/heap.cc',
- 'heap/heap.h',
- 'heap/incremental-marking-inl.h',
- 'heap/incremental-marking-job.cc',
- 'heap/incremental-marking-job.h',
- 'heap/incremental-marking.cc',
- 'heap/incremental-marking.h',
- 'heap/invalidated-slots-inl.h',
- 'heap/invalidated-slots.cc',
- 'heap/invalidated-slots.h',
- 'heap/item-parallel-job.h',
- 'heap/local-allocator.h',
- 'heap/mark-compact-inl.h',
- 'heap/mark-compact.cc',
- 'heap/mark-compact.h',
- 'heap/marking.cc',
- 'heap/marking.h',
- 'heap/object-stats.cc',
- 'heap/object-stats.h',
- 'heap/objects-visiting-inl.h',
- 'heap/objects-visiting.cc',
- 'heap/objects-visiting.h',
- 'heap/remembered-set.h',
- 'heap/scavenge-job.h',
- 'heap/scavenge-job.cc',
- 'heap/scavenger-inl.h',
- 'heap/scavenger.cc',
- 'heap/scavenger.h',
- 'heap/slot-set.h',
- 'heap/spaces-inl.h',
- 'heap/spaces.cc',
- 'heap/spaces.h',
- 'heap/store-buffer.cc',
- 'heap/store-buffer.h',
- 'heap/stress-marking-observer.cc',
- 'heap/stress-marking-observer.h',
- 'heap/stress-scavenge-observer.cc',
- 'heap/stress-scavenge-observer.h',
- 'heap/sweeper.cc',
- 'heap/sweeper.h',
- 'heap/worklist.h',
- 'intl.cc',
- 'intl.h',
- 'icu_util.cc',
- 'icu_util.h',
- 'ic/call-optimization.cc',
- 'ic/call-optimization.h',
- 'ic/handler-configuration.cc',
- 'ic/handler-configuration-inl.h',
- 'ic/handler-configuration.h',
- 'ic/ic-inl.h',
- 'ic/ic-stats.cc',
- 'ic/ic-stats.h',
- 'ic/ic.cc',
- 'ic/ic.h',
- 'identity-map.cc',
- 'identity-map.h',
- 'interface-descriptors.cc',
- 'interface-descriptors.h',
- 'interpreter/block-coverage-builder.h',
- 'interpreter/bytecodes.cc',
- 'interpreter/bytecodes.h',
- 'interpreter/bytecode-array-accessor.cc',
- 'interpreter/bytecode-array-accessor.h',
- 'interpreter/bytecode-array-builder.cc',
- 'interpreter/bytecode-array-builder.h',
- 'interpreter/bytecode-array-iterator.cc',
- 'interpreter/bytecode-array-iterator.h',
- 'interpreter/bytecode-array-random-iterator.cc',
- 'interpreter/bytecode-array-random-iterator.h',
- 'interpreter/bytecode-array-writer.cc',
- 'interpreter/bytecode-array-writer.h',
- 'interpreter/bytecode-decoder.cc',
- 'interpreter/bytecode-decoder.h',
- 'interpreter/bytecode-flags.cc',
- 'interpreter/bytecode-flags.h',
- 'interpreter/bytecode-generator.cc',
- 'interpreter/bytecode-generator.h',
- 'interpreter/bytecode-label.cc',
- 'interpreter/bytecode-label.h',
- 'interpreter/bytecode-node.cc',
- 'interpreter/bytecode-node.h',
- 'interpreter/bytecode-operands.cc',
- 'interpreter/bytecode-operands.h',
- 'interpreter/bytecode-register.cc',
- 'interpreter/bytecode-register.h',
- 'interpreter/bytecode-register-allocator.h',
- 'interpreter/bytecode-register-optimizer.cc',
- 'interpreter/bytecode-register-optimizer.h',
- 'interpreter/bytecode-source-info.cc',
- 'interpreter/bytecode-source-info.h',
- 'interpreter/bytecode-jump-table.h',
- 'interpreter/bytecode-traits.h',
- 'interpreter/constant-array-builder.cc',
- 'interpreter/constant-array-builder.h',
- 'interpreter/control-flow-builders.cc',
- 'interpreter/control-flow-builders.h',
- 'interpreter/handler-table-builder.cc',
- 'interpreter/handler-table-builder.h',
- 'interpreter/interpreter.cc',
- 'interpreter/interpreter.h',
- 'interpreter/interpreter-generator.h',
- 'interpreter/interpreter-intrinsics.cc',
- 'interpreter/interpreter-intrinsics.h',
- 'isolate-inl.h',
- 'isolate.cc',
- 'isolate.h',
- 'json-parser.cc',
- 'json-parser.h',
- 'json-stringifier.cc',
- 'json-stringifier.h',
- 'keys.h',
- 'keys.cc',
- 'label.h',
- 'layout-descriptor-inl.h',
- 'layout-descriptor.cc',
- 'layout-descriptor.h',
- 'locked-queue-inl.h',
- 'locked-queue.h',
- 'log-inl.h',
- 'log-utils.cc',
- 'log-utils.h',
- 'log.cc',
- 'log.h',
- 'lookup-cache-inl.h',
- 'lookup-cache.cc',
- 'lookup-cache.h',
- 'lookup.cc',
- 'lookup.h',
- 'map-updater.cc',
- 'map-updater.h',
- 'macro-assembler-inl.h',
- 'macro-assembler.h',
- 'machine-type.cc',
- 'machine-type.h',
- 'managed.h',
- 'messages.cc',
- 'messages.h',
- 'msan.h',
- 'objects-body-descriptors-inl.h',
- 'objects-body-descriptors.h',
- 'objects-debug.cc',
- 'objects-inl.h',
- 'objects-printer.cc',
- 'objects.cc',
- 'objects.h',
- 'objects/arguments-inl.h',
- 'objects/arguments.h',
- 'objects/bigint.cc',
- 'objects/bigint.h',
- 'objects/code-inl.h',
- 'objects/code.h',
- 'objects/compilation-cache.h',
- 'objects/compilation-cache-inl.h',
- 'objects/data-handler.h',
- 'objects/data-handler-inl.h',
- 'objects/debug-objects-inl.h',
- 'objects/debug-objects.cc',
- 'objects/debug-objects.h',
- 'objects/descriptor-array.h',
- 'objects/dictionary.h',
- 'objects/fixed-array.h',
- 'objects/fixed-array-inl.h',
- 'objects/frame-array.h',
- 'objects/frame-array-inl.h',
- 'objects/hash-table-inl.h',
- 'objects/hash-table.h',
- 'objects/intl-objects.cc',
- 'objects/intl-objects.h',
- 'objects/js-array.h',
- 'objects/js-array-inl.h',
- 'objects/js-collection.h',
- 'objects/js-collection-inl.h',
- 'objects/js-regexp.h',
- 'objects/js-regexp-inl.h',
- 'objects/literal-objects.cc',
- 'objects/literal-objects-inl.h',
- 'objects/literal-objects.h',
- 'objects/map-inl.h',
- 'objects/map.h',
- 'objects/name-inl.h',
- 'objects/name.h',
- 'objects/module-inl.h',
- 'objects/module.cc',
- 'objects/module.h',
- 'objects/object-macros.h',
- 'objects/object-macros-undef.h',
- 'objects/property-descriptor-object.h',
- 'objects/property-descriptor-object-inl.h',
- 'objects/regexp-match-info.h',
- 'objects/scope-info.cc',
- 'objects/scope-info.h',
- 'objects/script.h',
- 'objects/script-inl.h',
- 'objects/shared-function-info-inl.h',
- 'objects/shared-function-info.h',
- 'objects/string-inl.h',
- 'objects/string.h',
- 'objects/string-table.h',
- 'objects/template-objects.cc',
- 'objects/template-objects.h',
- 'ostreams.cc',
- 'ostreams.h',
- 'parsing/background-parsing-task.cc',
- 'parsing/background-parsing-task.h',
- 'parsing/duplicate-finder.h',
- 'parsing/expression-classifier.h',
- 'parsing/expression-scope-reparenter.cc',
- 'parsing/expression-scope-reparenter.h',
- 'parsing/func-name-inferrer.cc',
- 'parsing/func-name-inferrer.h',
- 'parsing/parse-info.cc',
- 'parsing/parse-info.h',
- 'parsing/parser-base.h',
- 'parsing/parser.cc',
- 'parsing/parser.h',
- 'parsing/parsing.cc',
- 'parsing/parsing.h',
- 'parsing/pattern-rewriter.cc',
- 'parsing/preparse-data-format.h',
- 'parsing/preparse-data.cc',
- 'parsing/preparse-data.h',
- 'parsing/preparsed-scope-data.cc',
- 'parsing/preparsed-scope-data.h',
- 'parsing/preparser.cc',
- 'parsing/preparser.h',
- 'parsing/rewriter.cc',
- 'parsing/rewriter.h',
- 'parsing/scanner-character-streams.cc',
- 'parsing/scanner-character-streams.h',
- 'parsing/scanner.cc',
- 'parsing/scanner.h',
- 'parsing/token.cc',
- 'parsing/token.h',
- 'pending-compilation-error-handler.cc',
- 'pending-compilation-error-handler.h',
- 'perf-jit.cc',
- 'perf-jit.h',
- 'profiler/allocation-tracker.cc',
- 'profiler/allocation-tracker.h',
- 'profiler/circular-queue-inl.h',
- 'profiler/circular-queue.h',
- 'profiler/cpu-profiler-inl.h',
- 'profiler/cpu-profiler.cc',
- 'profiler/cpu-profiler.h',
- 'profiler/heap-profiler.cc',
- 'profiler/heap-profiler.h',
- 'profiler/heap-snapshot-generator-inl.h',
- 'profiler/heap-snapshot-generator.cc',
- 'profiler/heap-snapshot-generator.h',
- 'profiler/profiler-listener.cc',
- 'profiler/profiler-listener.h',
- 'profiler/profile-generator-inl.h',
- 'profiler/profile-generator.cc',
- 'profiler/profile-generator.h',
- 'profiler/sampling-heap-profiler.cc',
- 'profiler/sampling-heap-profiler.h',
- 'profiler/strings-storage.cc',
- 'profiler/strings-storage.h',
- 'profiler/tick-sample.cc',
- 'profiler/tick-sample.h',
- 'profiler/tracing-cpu-profiler.cc',
- 'profiler/tracing-cpu-profiler.h',
- 'profiler/unbound-queue-inl.h',
- 'profiler/unbound-queue.h',
- 'property-descriptor.cc',
- 'property-descriptor.h',
- 'property-details.h',
- 'property.cc',
- 'property.h',
- 'prototype.h',
- 'regexp/bytecodes-irregexp.h',
- 'regexp/interpreter-irregexp.cc',
- 'regexp/interpreter-irregexp.h',
- 'regexp/jsregexp-inl.h',
- 'regexp/jsregexp.cc',
- 'regexp/jsregexp.h',
- 'regexp/regexp-ast.cc',
- 'regexp/regexp-ast.h',
- 'regexp/regexp-macro-assembler-irregexp-inl.h',
- 'regexp/regexp-macro-assembler-irregexp.cc',
- 'regexp/regexp-macro-assembler-irregexp.h',
- 'regexp/regexp-macro-assembler-tracer.cc',
- 'regexp/regexp-macro-assembler-tracer.h',
- 'regexp/regexp-macro-assembler.cc',
- 'regexp/regexp-macro-assembler.h',
- 'regexp/regexp-parser.cc',
- 'regexp/regexp-parser.h',
- 'regexp/regexp-stack.cc',
- 'regexp/regexp-stack.h',
- 'regexp/regexp-utils.cc',
- 'regexp/regexp-utils.h',
- 'register-configuration.cc',
- 'register-configuration.h',
- 'reglist.h',
- 'runtime-profiler.cc',
- 'runtime-profiler.h',
- 'runtime/runtime-array.cc',
- 'runtime/runtime-atomics.cc',
- 'runtime/runtime-bigint.cc',
- 'runtime/runtime-classes.cc',
- 'runtime/runtime-collections.cc',
- 'runtime/runtime-compiler.cc',
- 'runtime/runtime-date.cc',
- 'runtime/runtime-debug.cc',
- 'runtime/runtime-forin.cc',
- 'runtime/runtime-function.cc',
- 'runtime/runtime-error.cc',
- 'runtime/runtime-futex.cc',
- 'runtime/runtime-generator.cc',
- 'runtime/runtime-intl.cc',
- 'runtime/runtime-internal.cc',
- 'runtime/runtime-interpreter.cc',
- 'runtime/runtime-literals.cc',
- 'runtime/runtime-liveedit.cc',
- 'runtime/runtime-maths.cc',
- 'runtime/runtime-module.cc',
- 'runtime/runtime-numbers.cc',
- 'runtime/runtime-object.cc',
- 'runtime/runtime-operators.cc',
- 'runtime/runtime-promise.cc',
- 'runtime/runtime-proxy.cc',
- 'runtime/runtime-regexp.cc',
- 'runtime/runtime-scopes.cc',
- 'runtime/runtime-strings.cc',
- 'runtime/runtime-symbol.cc',
- 'runtime/runtime-test.cc',
- 'runtime/runtime-typedarray.cc',
- 'runtime/runtime-utils.h',
- 'runtime/runtime-wasm.cc',
- 'runtime/runtime.cc',
- 'runtime/runtime.h',
- 'safepoint-table.cc',
- 'safepoint-table.h',
- 'setup-isolate.h',
- 'signature.h',
- 'simulator-base.cc',
- 'simulator-base.h',
- 'simulator.h',
- 'snapshot/builtin-deserializer-allocator.cc',
- 'snapshot/builtin-deserializer-allocator.h',
- 'snapshot/builtin-deserializer.cc',
- 'snapshot/builtin-deserializer.h',
- 'snapshot/builtin-serializer-allocator.cc',
- 'snapshot/builtin-serializer-allocator.h',
- 'snapshot/builtin-serializer.cc',
- 'snapshot/builtin-serializer.h',
- 'snapshot/builtin-snapshot-utils.cc',
- 'snapshot/builtin-snapshot-utils.h',
- 'snapshot/code-serializer.cc',
- 'snapshot/code-serializer.h',
- 'snapshot/default-deserializer-allocator.cc',
- 'snapshot/default-deserializer-allocator.h',
- 'snapshot/default-serializer-allocator.cc',
- 'snapshot/default-serializer-allocator.h',
- 'snapshot/deserializer.cc',
- 'snapshot/deserializer.h',
- 'snapshot/natives-common.cc',
- 'snapshot/natives.h',
- 'snapshot/object-deserializer.cc',
- 'snapshot/object-deserializer.h',
- 'snapshot/partial-deserializer.cc',
- 'snapshot/partial-deserializer.h',
- 'snapshot/partial-serializer.cc',
- 'snapshot/partial-serializer.h',
- 'snapshot/serializer.cc',
- 'snapshot/serializer-common.cc',
- 'snapshot/serializer-common.h',
- 'snapshot/serializer.h',
- 'snapshot/snapshot-common.cc',
- 'snapshot/snapshot.h',
- 'snapshot/snapshot-source-sink.cc',
- 'snapshot/snapshot-source-sink.h',
- 'snapshot/startup-deserializer.cc',
- 'snapshot/startup-deserializer.h',
- 'snapshot/startup-serializer.cc',
- 'snapshot/startup-serializer.h',
- 'source-position-table.cc',
- 'source-position-table.h',
- 'source-position.cc',
- 'source-position.h',
- 'splay-tree.h',
- 'splay-tree-inl.h',
- 'startup-data-util.cc',
- 'startup-data-util.h',
- 'string-builder.cc',
- 'string-builder.h',
- 'string-case.cc',
- 'string-case.h',
- 'string-hasher-inl.h',
- 'string-hasher.h',
- 'string-search.h',
- 'string-stream.cc',
- 'string-stream.h',
- 'strtod.cc',
- 'strtod.h',
- 'ic/stub-cache.cc',
- 'ic/stub-cache.h',
- 'third_party/utf8-decoder/utf8-decoder.h',
- 'tracing/trace-event.cc',
- 'tracing/trace-event.h',
- 'tracing/traced-value.cc',
- 'tracing/traced-value.h',
- 'tracing/tracing-category-observer.cc',
- 'tracing/tracing-category-observer.h',
- 'transitions-inl.h',
- 'transitions.cc',
- 'transitions.h',
- 'trap-handler/handler-outside.cc',
- 'trap-handler/handler-shared.cc',
- 'trap-handler/trap-handler.h',
- 'trap-handler/trap-handler-internal.h',
- 'type-hints.cc',
- 'type-hints.h',
- 'unicode-inl.h',
- 'unicode.cc',
- 'unicode.h',
- 'unicode-cache-inl.h',
- 'unicode-cache.h',
- 'unicode-decoder.cc',
- 'unicode-decoder.h',
- 'uri.cc',
- 'uri.h',
- 'utils-inl.h',
- 'utils.cc',
- 'utils.h',
- 'v8.cc',
- 'v8.h',
- 'v8memory.h',
- 'v8threads.cc',
- 'v8threads.h',
- 'value-serializer.cc',
- 'value-serializer.h',
- 'vector-slot-pair.cc',
- 'vector-slot-pair.h',
- 'vector.h',
- 'version.cc',
- 'version.h',
- 'visitors.cc',
- 'visitors.h',
- 'vm-state-inl.h',
- 'vm-state.h',
- 'wasm/baseline/liftoff-assembler-defs.h',
- 'wasm/baseline/liftoff-assembler.cc',
- 'wasm/baseline/liftoff-assembler.h',
- 'wasm/baseline/liftoff-compiler.cc',
- 'wasm/baseline/liftoff-register.h',
- 'wasm/compilation-manager.cc',
- 'wasm/compilation-manager.h',
- 'wasm/decoder.h',
- 'wasm/function-body-decoder.cc',
- 'wasm/function-body-decoder.h',
- 'wasm/function-body-decoder-impl.h',
- 'wasm/leb-helper.h',
- 'wasm/local-decl-encoder.cc',
- 'wasm/local-decl-encoder.h',
- 'wasm/memory-tracing.cc',
- 'wasm/memory-tracing.h',
- 'wasm/module-compiler.cc',
- 'wasm/module-compiler.h',
- 'wasm/module-decoder.cc',
- 'wasm/module-decoder.h',
- 'wasm/signature-map.cc',
- 'wasm/signature-map.h',
- 'wasm/streaming-decoder.cc',
- 'wasm/streaming-decoder.h',
- 'wasm/wasm-api.cc',
- 'wasm/wasm-api.h',
- 'wasm/wasm-code-manager.cc',
- 'wasm/wasm-code-manager.h',
- 'wasm/wasm-code-specialization.cc',
- 'wasm/wasm-code-specialization.h',
- 'wasm/wasm-code-wrapper.cc',
- 'wasm/wasm-code-wrapper.h',
- 'wasm/wasm-constants.h',
- 'wasm/wasm-debug.cc',
- 'wasm/wasm-engine.cc',
- 'wasm/wasm-engine.h',
- 'wasm/wasm-external-refs.cc',
- 'wasm/wasm-external-refs.h',
- 'wasm/wasm-js.cc',
- 'wasm/wasm-js.h',
- 'wasm/wasm-limits.h',
- 'wasm/wasm-memory.cc',
- 'wasm/wasm-memory.h',
- 'wasm/wasm-module.cc',
- 'wasm/wasm-module.h',
- 'wasm/wasm-module-builder.cc',
- 'wasm/wasm-module-builder.h',
- 'wasm/wasm-interpreter.cc',
- 'wasm/wasm-interpreter.h',
- 'wasm/wasm-objects-inl.h',
- 'wasm/wasm-objects.cc',
- 'wasm/wasm-objects.h',
- 'wasm/wasm-opcodes.cc',
- 'wasm/wasm-opcodes.h',
- 'wasm/wasm-result.cc',
- 'wasm/wasm-result.h',
- 'wasm/wasm-serialization.cc',
- 'wasm/wasm-serialization.h',
- 'wasm/wasm-text.cc',
- 'wasm/wasm-text.h',
- 'wasm/wasm-value.h',
- 'zone/accounting-allocator.cc',
- 'zone/accounting-allocator.h',
- 'zone/zone-segment.cc',
- 'zone/zone-segment.h',
- 'zone/zone.cc',
- 'zone/zone.h',
- 'zone/zone-chunk-list.h',
- 'zone/zone-segment.cc',
- 'zone/zone-segment.h',
- 'zone/zone-allocator.h',
- 'zone/zone-containers.h',
- 'zone/zone-handle-set.h',
- 'zone/zone-list-inl.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'arm/assembler-arm-inl.h',
- 'arm/assembler-arm.cc',
- 'arm/assembler-arm.h',
- 'arm/code-stubs-arm.cc',
- 'arm/code-stubs-arm.h',
- 'arm/codegen-arm.cc',
- 'arm/constants-arm.h',
- 'arm/constants-arm.cc',
- 'arm/cpu-arm.cc',
- 'arm/deoptimizer-arm.cc',
- 'arm/disasm-arm.cc',
- 'arm/frame-constants-arm.cc',
- 'arm/frame-constants-arm.h',
- 'arm/interface-descriptors-arm.cc',
- 'arm/interface-descriptors-arm.h',
- 'arm/macro-assembler-arm.cc',
- 'arm/macro-assembler-arm.h',
- 'arm/simulator-arm.cc',
- 'arm/simulator-arm.h',
- 'arm/eh-frame-arm.cc',
- 'compiler/arm/code-generator-arm.cc',
- 'compiler/arm/instruction-codes-arm.h',
- 'compiler/arm/instruction-scheduler-arm.cc',
- 'compiler/arm/instruction-selector-arm.cc',
- 'compiler/arm/unwinding-info-writer-arm.cc',
- 'compiler/arm/unwinding-info-writer-arm.h',
- 'debug/arm/debug-arm.cc',
- 'regexp/arm/regexp-macro-assembler-arm.cc',
- 'regexp/arm/regexp-macro-assembler-arm.h',
- 'wasm/baseline/arm/liftoff-assembler-arm-defs.h',
- 'wasm/baseline/arm/liftoff-assembler-arm.h',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'arm64/assembler-arm64.cc',
- 'arm64/assembler-arm64.h',
- 'arm64/assembler-arm64-inl.h',
- 'arm64/codegen-arm64.cc',
- 'arm64/code-stubs-arm64.cc',
- 'arm64/code-stubs-arm64.h',
- 'arm64/constants-arm64.h',
- 'arm64/cpu-arm64.cc',
- 'arm64/decoder-arm64.cc',
- 'arm64/decoder-arm64.h',
- 'arm64/decoder-arm64-inl.h',
- 'arm64/deoptimizer-arm64.cc',
- 'arm64/disasm-arm64.cc',
- 'arm64/disasm-arm64.h',
- 'arm64/frame-constants-arm64.cc',
- 'arm64/frame-constants-arm64.h',
- 'arm64/instructions-arm64-constants.cc',
- 'arm64/instructions-arm64.cc',
- 'arm64/instructions-arm64.h',
- 'arm64/instrument-arm64.cc',
- 'arm64/instrument-arm64.h',
- 'arm64/interface-descriptors-arm64.cc',
- 'arm64/interface-descriptors-arm64.h',
- 'arm64/macro-assembler-arm64.cc',
- 'arm64/macro-assembler-arm64.h',
- 'arm64/macro-assembler-arm64-inl.h',
- 'arm64/simulator-arm64.cc',
- 'arm64/simulator-arm64.h',
- 'arm64/simulator-logic-arm64.cc',
- 'arm64/utils-arm64.cc',
- 'arm64/utils-arm64.h',
- 'arm64/eh-frame-arm64.cc',
- 'compiler/arm64/code-generator-arm64.cc',
- 'compiler/arm64/instruction-codes-arm64.h',
- 'compiler/arm64/instruction-scheduler-arm64.cc',
- 'compiler/arm64/instruction-selector-arm64.cc',
- 'compiler/arm64/unwinding-info-writer-arm64.cc',
- 'compiler/arm64/unwinding-info-writer-arm64.h',
- 'debug/arm64/debug-arm64.cc',
- 'regexp/arm64/regexp-macro-assembler-arm64.cc',
- 'regexp/arm64/regexp-macro-assembler-arm64.h',
- 'wasm/baseline/arm64/liftoff-assembler-arm64-defs.h',
- 'wasm/baseline/arm64/liftoff-assembler-arm64.h',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'ia32/assembler-ia32-inl.h',
- 'ia32/assembler-ia32.cc',
- 'ia32/assembler-ia32.h',
- 'ia32/code-stubs-ia32.cc',
- 'ia32/codegen-ia32.cc',
- 'ia32/cpu-ia32.cc',
- 'ia32/deoptimizer-ia32.cc',
- 'ia32/disasm-ia32.cc',
- 'ia32/frame-constants-ia32.cc',
- 'ia32/frame-constants-ia32.h',
- 'ia32/interface-descriptors-ia32.cc',
- 'ia32/macro-assembler-ia32.cc',
- 'ia32/macro-assembler-ia32.h',
- 'ia32/simulator-ia32.cc',
- 'ia32/simulator-ia32.h',
- 'ia32/sse-instr.h',
- 'compiler/ia32/code-generator-ia32.cc',
- 'compiler/ia32/instruction-codes-ia32.h',
- 'compiler/ia32/instruction-scheduler-ia32.cc',
- 'compiler/ia32/instruction-selector-ia32.cc',
- 'debug/ia32/debug-ia32.cc',
- 'regexp/ia32/regexp-macro-assembler-ia32.cc',
- 'regexp/ia32/regexp-macro-assembler-ia32.h',
- 'wasm/baseline/ia32/liftoff-assembler-ia32-defs.h',
- 'wasm/baseline/ia32/liftoff-assembler-ia32.h',
- ],
- }],
- ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'mips/assembler-mips.cc',
- 'mips/assembler-mips.h',
- 'mips/assembler-mips-inl.h',
- 'mips/codegen-mips.cc',
- 'mips/code-stubs-mips.cc',
- 'mips/code-stubs-mips.h',
- 'mips/constants-mips.cc',
- 'mips/constants-mips.h',
- 'mips/cpu-mips.cc',
- 'mips/deoptimizer-mips.cc',
- 'mips/disasm-mips.cc',
- 'mips/frame-constants-mips.cc',
- 'mips/frame-constants-mips.h',
- 'mips/interface-descriptors-mips.cc',
- 'mips/macro-assembler-mips.cc',
- 'mips/macro-assembler-mips.h',
- 'mips/simulator-mips.cc',
- 'mips/simulator-mips.h',
- 'compiler/mips/code-generator-mips.cc',
- 'compiler/mips/instruction-codes-mips.h',
- 'compiler/mips/instruction-scheduler-mips.cc',
- 'compiler/mips/instruction-selector-mips.cc',
- 'debug/mips/debug-mips.cc',
- 'regexp/mips/regexp-macro-assembler-mips.cc',
- 'regexp/mips/regexp-macro-assembler-mips.h',
- 'wasm/baseline/mips/liftoff-assembler-mips-defs.h',
- 'wasm/baseline/mips/liftoff-assembler-mips.h',
- ],
- }],
- ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'mips64/assembler-mips64.cc',
- 'mips64/assembler-mips64.h',
- 'mips64/assembler-mips64-inl.h',
- 'mips64/codegen-mips64.cc',
- 'mips64/code-stubs-mips64.cc',
- 'mips64/code-stubs-mips64.h',
- 'mips64/constants-mips64.cc',
- 'mips64/constants-mips64.h',
- 'mips64/cpu-mips64.cc',
- 'mips64/deoptimizer-mips64.cc',
- 'mips64/disasm-mips64.cc',
- 'mips64/frame-constants-mips64.cc',
- 'mips64/frame-constants-mips64.h',
- 'mips64/interface-descriptors-mips64.cc',
- 'mips64/macro-assembler-mips64.cc',
- 'mips64/macro-assembler-mips64.h',
- 'mips64/simulator-mips64.cc',
- 'mips64/simulator-mips64.h',
- 'compiler/mips64/code-generator-mips64.cc',
- 'compiler/mips64/instruction-codes-mips64.h',
- 'compiler/mips64/instruction-scheduler-mips64.cc',
- 'compiler/mips64/instruction-selector-mips64.cc',
- 'debug/mips64/debug-mips64.cc',
- 'regexp/mips64/regexp-macro-assembler-mips64.cc',
- 'regexp/mips64/regexp-macro-assembler-mips64.h',
- 'wasm/baseline/mips64/liftoff-assembler-mips64-defs.h',
- 'wasm/baseline/mips64/liftoff-assembler-mips64.h',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'compiler/x64/code-generator-x64.cc',
- 'compiler/x64/instruction-codes-x64.h',
- 'compiler/x64/instruction-scheduler-x64.cc',
- 'compiler/x64/instruction-selector-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.h',
- 'x64/assembler-x64-inl.h',
- 'x64/assembler-x64.cc',
- 'x64/assembler-x64.h',
- 'x64/code-stubs-x64.cc',
- 'x64/codegen-x64.cc',
- 'x64/cpu-x64.cc',
- 'x64/deoptimizer-x64.cc',
- 'x64/disasm-x64.cc',
- 'x64/eh-frame-x64.cc',
- 'x64/frame-constants-x64.cc',
- 'x64/frame-constants-x64.h',
- 'x64/interface-descriptors-x64.cc',
- 'x64/macro-assembler-x64.cc',
- 'x64/macro-assembler-x64.h',
- 'x64/simulator-x64.cc',
- 'x64/simulator-x64.h',
- 'x64/sse-instr.h',
- 'debug/x64/debug-x64.cc',
- 'regexp/x64/regexp-macro-assembler-x64.cc',
- 'regexp/x64/regexp-macro-assembler-x64.h',
- 'third_party/valgrind/valgrind.h',
- 'wasm/baseline/x64/liftoff-assembler-x64-defs.h',
- 'wasm/baseline/x64/liftoff-assembler-x64.h',
- ],
- }],
- ['v8_target_arch=="x64" and OS=="linux"', {
- 'sources': ['trap-handler/handler-inside.cc']
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'compiler/ppc/code-generator-ppc.cc',
- 'compiler/ppc/instruction-codes-ppc.h',
- 'compiler/ppc/instruction-scheduler-ppc.cc',
- 'compiler/ppc/instruction-selector-ppc.cc',
- 'debug/ppc/debug-ppc.cc',
- 'ppc/assembler-ppc-inl.h',
- 'ppc/assembler-ppc.cc',
- 'ppc/assembler-ppc.h',
- 'ppc/code-stubs-ppc.cc',
- 'ppc/code-stubs-ppc.h',
- 'ppc/codegen-ppc.cc',
- 'ppc/constants-ppc.h',
- 'ppc/constants-ppc.cc',
- 'ppc/cpu-ppc.cc',
- 'ppc/deoptimizer-ppc.cc',
- 'ppc/disasm-ppc.cc',
- 'ppc/frame-constants-ppc.cc',
- 'ppc/frame-constants-ppc.h',
- 'ppc/interface-descriptors-ppc.cc',
- 'ppc/macro-assembler-ppc.cc',
- 'ppc/macro-assembler-ppc.h',
- 'ppc/simulator-ppc.cc',
- 'ppc/simulator-ppc.h',
- 'regexp/ppc/regexp-macro-assembler-ppc.cc',
- 'regexp/ppc/regexp-macro-assembler-ppc.h',
- 'wasm/baseline/ppc/liftoff-assembler-ppc-defs.h',
- 'wasm/baseline/ppc/liftoff-assembler-ppc.h',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'compiler/s390/code-generator-s390.cc',
- 'compiler/s390/instruction-codes-s390.h',
- 'compiler/s390/instruction-scheduler-s390.cc',
- 'compiler/s390/instruction-selector-s390.cc',
- 'debug/s390/debug-s390.cc',
- 'regexp/s390/regexp-macro-assembler-s390.cc',
- 'regexp/s390/regexp-macro-assembler-s390.h',
- 's390/assembler-s390.cc',
- 's390/assembler-s390.h',
- 's390/assembler-s390-inl.h',
- 's390/codegen-s390.cc',
- 's390/code-stubs-s390.cc',
- 's390/code-stubs-s390.h',
- 's390/constants-s390.cc',
- 's390/constants-s390.h',
- 's390/cpu-s390.cc',
- 's390/deoptimizer-s390.cc',
- 's390/disasm-s390.cc',
- 's390/frame-constants-s390.cc',
- 's390/frame-constants-s390.h',
- 's390/interface-descriptors-s390.cc',
- 's390/macro-assembler-s390.cc',
- 's390/macro-assembler-s390.h',
- 's390/simulator-s390.cc',
- 's390/simulator-s390.h',
- 'wasm/baseline/s390/liftoff-assembler-s390-defs.h',
- 'wasm/baseline/s390/liftoff-assembler-s390.h',
- ],
- }],
- ['OS=="win"', {
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- # When building Official, the .lib is too large and exceeds the 2G
- # limit. This breaks it into multiple pieces to avoid the limit.
- # See http://crbug.com/485155.
- 'msvs_shard': 4,
- # This will prevent V8's .cc files conflicting with the inspector's
- # .cpp files in the same shard.
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- 'ObjectFile':'$(IntDir)%(Extension)\\',
- },
- },
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ['v8_postmortem_support=="true"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ]
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- 'conditions': [
- ['icu_use_data_file_flag==1', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
- }, { # else icu_use_data_file_flag !=1
- 'conditions': [
- ['OS=="win"', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
- }, {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
- }],
- ],
- }],
- ],
- }, { # v8_enable_i18n_support==0
- 'sources!': [
- 'builtins/builtins-intl.cc',
- 'builtins/builtins-intl.h',
- 'char-predicates.cc',
- 'intl.cc',
- 'intl.h',
- 'objects/intl-objects.cc',
- 'objects/intl-objects.h',
- 'runtime/runtime-intl.cc',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libbase',
- 'type': '<(component)',
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- ],
- 'sources': [
- 'base/adapters.h',
- 'base/atomic-utils.h',
- 'base/atomicops.h',
- 'base/atomicops_internals_atomicword_compat.h',
- 'base/atomicops_internals_portable.h',
- 'base/atomicops_internals_std.h',
- 'base/base-export.h',
- 'base/bits.cc',
- 'base/bits.h',
- 'base/build_config.h',
- 'base/compiler-specific.h',
- 'base/cpu.cc',
- 'base/cpu.h',
- 'base/division-by-constant.cc',
- 'base/division-by-constant.h',
- 'base/debug/stack_trace.cc',
- 'base/debug/stack_trace.h',
- 'base/export-template.h',
- 'base/file-utils.cc',
- 'base/file-utils.h',
- 'base/flags.h',
- 'base/format-macros.h',
- 'base/free_deleter.h',
- 'base/functional.cc',
- 'base/functional.h',
- 'base/hashmap.h',
- 'base/hashmap-entry.h',
- 'base/ieee754.cc',
- 'base/ieee754.h',
- 'base/iterator.h',
- 'base/lazy-instance.h',
- 'base/logging.cc',
- 'base/logging.h',
- 'base/macros.h',
- 'base/once.cc',
- 'base/once.h',
- 'base/optional.h',
- 'base/page-allocator.cc',
- 'base/page-allocator.h',
- 'base/platform/elapsed-timer.h',
- 'base/platform/time.cc',
- 'base/platform/time.h',
- 'base/platform/condition-variable.cc',
- 'base/platform/condition-variable.h',
- 'base/platform/mutex.cc',
- 'base/platform/mutex.h',
- 'base/platform/platform.h',
- 'base/platform/semaphore.cc',
- 'base/platform/semaphore.h',
- 'base/ring-buffer.h',
- 'base/safe_conversions.h',
- 'base/safe_conversions_impl.h',
- 'base/safe_math.h',
- 'base/safe_math_impl.h',
- 'base/sys-info.cc',
- 'base/sys-info.h',
- 'base/template-utils.h',
- 'base/timezone-cache.h',
- 'base/tsan.h',
- 'base/utils/random-number-generator.cc',
- 'base/utils/random-number-generator.h',
- ],
- 'target_conditions': [
- ['OS=="android" and _toolset=="target"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_BASE_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_BASE_SHARED',
- ],
- },
- }],
- ['OS=="linux"', {
- 'link_settings': {
- 'libraries': [
- '-ldl',
- '-lrt'
- ],
- },
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-linux.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="android"', {
- 'sources': [
- 'base/debug/stack_trace_android.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host" and host_os!="mac"', {
- # Only include libdl and librt on host builds because they
- # are included by default on Android target builds, and we
- # don't want to re-include them here since this will change
- # library order and break (see crbug.com/469973).
- # These libraries do not exist on Mac hosted builds.
- 'libraries': [
- '-ldl',
- '-lrt'
- ]
- }]
- ]
- },
- 'conditions': [
- ['host_os=="mac"', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'sources': [
- 'base/platform/platform-macos.cc'
- ]
- }, {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ]
- }],
- ],
- }, {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ]
- }],
- ],
- },
- ],
- ['OS=="qnx"', {
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'libraries': [
- '-lrt'
- ],
- }],
- ['_toolset=="target"', {
- 'libraries': [
- '-lbacktrace'
- ],
- }],
- ],
- },
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- 'base/qnx-math.h'
- ],
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ],
- }],
- ['_toolset=="host" and host_os=="mac"', {
- 'sources': [
- 'base/platform/platform-macos.cc'
- ],
- }],
- ['_toolset=="target"', {
- 'sources': [
- 'base/platform/platform-qnx.cc'
- ],
- }],
- ],
- },
- ],
- ['OS=="freebsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-freebsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="openbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-openbsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="netbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-openbsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="aix"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-aix.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc'
- ]},
- ],
- ['OS=="fuchsia"', {
- 'sources': [
- 'base/debug/stack_trace_fuchsia.cc',
- 'base/platform/platform-fuchsia.cc',
- ]},
- ],
- ['OS=="solaris"', {
- 'link_settings': {
- 'libraries': [
- '-lnsl -lrt',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-solaris.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- ],
- }
- ],
- ['OS=="mac"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-macos.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ]},
- ],
- ['OS=="win"', {
- 'defines': [
- '_CRT_RAND_S' # for rand_s()
- ],
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'conditions': [
- ['gyp_generators=="make"', {
- 'variables': {
- 'build_env': '<!(uname -o)',
- },
- 'conditions': [
- ['build_env=="Cygwin"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-cygwin.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- ],
- }, {
- 'sources': [
- 'base/debug/stack_trace_win.cc',
- 'base/platform/platform-win32.cc',
- 'base/win32-headers.h',
- ],
- }],
- ],
- 'link_settings': {
- 'libraries': [ '-lwinmm', '-lws2_32' ],
- },
- }, {
- 'sources': [
- 'base/debug/stack_trace_win.cc',
- 'base/platform/platform-win32.cc',
- 'base/win32-headers.h',
- ],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [
- '-ldbghelp.lib',
- '-lshlwapi.lib',
- '-lwinmm.lib',
- '-lws2_32.lib'
- ],
- },
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libplatform',
- 'type': '<(component)',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- 'v8_libbase',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- '../include',
- ],
- 'sources': [
- '../include/libplatform/libplatform.h',
- '../include/libplatform/libplatform-export.h',
- '../include/libplatform/v8-tracing.h',
- 'libplatform/default-background-task-runner.cc',
- 'libplatform/default-background-task-runner.h',
- 'libplatform/default-foreground-task-runner.cc',
- 'libplatform/default-foreground-task-runner.h',
- 'libplatform/default-platform.cc',
- 'libplatform/default-platform.h',
- 'libplatform/task-queue.cc',
- 'libplatform/task-queue.h',
- 'libplatform/tracing/trace-buffer.cc',
- 'libplatform/tracing/trace-buffer.h',
- 'libplatform/tracing/trace-config.cc',
- 'libplatform/tracing/trace-object.cc',
- 'libplatform/tracing/trace-writer.cc',
- 'libplatform/tracing/trace-writer.h',
- 'libplatform/tracing/tracing-controller.cc',
- 'libplatform/worker-thread.cc',
- 'libplatform/worker-thread.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'direct_dependent_settings': {
- 'defines': [ 'USING_V8_PLATFORM_SHARED' ],
- },
- 'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
- }]
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- 'target_name': 'v8_libsampler',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- 'v8_libbase',
- ],
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [
- 'libsampler/sampler.cc',
- 'libsampler/sampler.h'
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- 'target_name': 'natives_blob',
- 'type': 'none',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'dependencies': ['js2c#host'],
- }, {
- 'dependencies': ['js2c'],
- }],
- ],
- 'actions': [{
- 'action_name': 'concatenate_natives_blob',
- 'inputs': [
- '../tools/concatenate-files.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob_host.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'js2c',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'variables': {
- 'library_files': [
- 'js/macros.py',
- 'messages.h',
- 'js/prologue.js',
- 'js/array.js',
- 'js/typedarray.js',
- 'js/messages.js',
- 'js/spread.js',
- 'debug/mirrors.js',
- 'debug/debug.js',
- 'debug/liveedit.js',
- ],
- 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'library_files': ['js/intl.js'],
- }],
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- ],
- },
- {
- 'action_name': 'js2c_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': ['<@(libraries_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- '--startup_blob', '<@(libraries_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_extras',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_extras_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_extras_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- '--startup_blob', '<@(libraries_extras_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- ],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
- '--nojs',
- ],
- },
- ],
- },
- {
- 'target_name': 'postmortem-metadata',
- 'type': 'none',
- 'variables': {
- 'heapobject_files': [
- 'objects.h',
- 'objects-inl.h',
- 'objects/code.h',
- 'objects/code-inl.h',
- 'objects/fixed-array.h',
- 'objects/fixed-array-inl.h',
- 'objects/js-array.h',
- 'objects/js-array-inl.h',
- 'objects/js-regexp.h',
- 'objects/js-regexp-inl.h',
- 'objects/map.h',
- 'objects/map-inl.h',
- 'objects/script.h',
- 'objects/script-inl.h',
- 'objects/shared-function-info.h',
- 'objects/shared-function-info-inl.h',
- 'objects/string.h',
- 'objects/string-inl.h',
- ],
- },
- 'actions': [
- {
- 'action_name': 'gen-postmortem-metadata',
- 'inputs': [
- '../tools/gen-postmortem-metadata.py',
- '<@(heapobject_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ],
- 'action': [
- 'python',
- '../tools/gen-postmortem-metadata.py',
- '<@(_outputs)',
- '<@(heapobject_files)'
- ]
- }
- ]
- },
- {
- 'target_name': 'mksnapshot',
- 'type': 'executable',
- 'dependencies': [
- 'v8_base',
- 'v8_init',
- 'v8_libbase',
- 'v8_libplatform',
- 'v8_nosnapshot',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- 'snapshot/mksnapshot.cc',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ]
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- {
- 'target_name': 'v8_dump_build_config',
- 'type': 'none',
- 'variables': {
- },
- 'actions': [
- {
- 'action_name': 'v8_dump_build_config',
- 'inputs': [
- '../tools/testrunner/utils/dump_build_config_gyp.py',
- ],
- 'outputs': [
- '<(PRODUCT_DIR)/v8_build_config.json',
- ],
- 'action': [
- 'python',
- '../tools/testrunner/utils/dump_build_config_gyp.py',
- '<(PRODUCT_DIR)/v8_build_config.json',
- 'dcheck_always_on=<(dcheck_always_on)',
- 'is_asan=<(asan)',
- 'is_cfi=<(cfi_vptr)',
- 'is_component_build=<(component)',
- 'is_debug=<(CONFIGURATION_NAME)',
- # Not available in gyp.
- 'is_gcov_coverage=0',
- 'is_msan=<(msan)',
- 'is_tsan=<(tsan)',
- # Not available in gyp.
- 'is_ubsan_vptr=0',
- 'target_cpu=<(target_arch)',
- 'v8_enable_i18n_support=<(v8_enable_i18n_support)',
- 'v8_enable_verify_predictable=<(v8_enable_verify_predictable)',
- 'v8_target_cpu=<(v8_target_arch)',
- 'v8_use_snapshot=<(v8_use_snapshot)',
- ],
- },
- ],
- },
- {
- 'target_name': 'v8_monolith',
- 'type': 'static_library',
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- 'actions': [
- {
- 'action_name': 'build_with_gn',
- 'inputs': [
- '../tools/node/build_gn.py',
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/obj/libv8_monolith.a',
- '<(INTERMEDIATE_DIR)/args.gn',
- ],
- 'action': [
- '../tools/node/build_gn.py',
- '<(CONFIGURATION_NAME)',
- '../',
- '<(INTERMEDIATE_DIR)',
- 'v8_promise_internal_field_count=<(v8_promise_internal_field_count)',
- 'target_cpu="<(target_arch)"',
- 'target_os="<(OS)"',
- 'v8_target_cpu="<(v8_target_arch)"',
- 'v8_embedder_string="<(v8_embedder_string)"',
- 'v8_use_snapshot=<(v8_use_snapshot)',
- 'v8_optimized_debug=<(v8_optimized_debug)',
- 'v8_enable_disassembler=<(v8_enable_disassembler)',
- 'v8_postmortem_support=<(v8_postmortem_support)',
- ],
- },
- ],
- },
- ],
-}
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index db561b9918..bf4240fa70 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -5,10 +5,13 @@
#ifndef V8_V8_H_
#define V8_V8_H_
-#include "include/v8.h"
#include "src/globals.h"
namespace v8 {
+
+class Platform;
+class StartupData;
+
namespace internal {
class V8 : public AllStatic {
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index d34bce7746..528de5836c 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MEMORY_H_
-#define V8_MEMORY_H_
+#ifndef V8_V8MEMORY_H_
+#define V8_V8MEMORY_H_
namespace v8 {
namespace internal {
@@ -76,4 +76,4 @@ class Memory {
} // namespace internal
} // namespace v8
-#endif // V8_MEMORY_H_
+#endif // V8_V8MEMORY_H_
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 5e2ab19877..30f6a7a729 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -18,7 +18,7 @@
#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
-#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -161,6 +161,8 @@ enum class ArrayBufferViewTag : uint8_t {
kUint32Array = 'D',
kFloat32Array = 'f',
kFloat64Array = 'F',
+ kBigInt64Array = 'q',
+ kBigUint64Array = 'Q',
kDataView = '?',
};
@@ -1084,13 +1086,13 @@ bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
void ValueDeserializer::TransferArrayBuffer(
uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
if (array_buffer_transfer_map_.is_null()) {
- array_buffer_transfer_map_ =
- isolate_->global_handles()->Create(*NumberDictionary::New(isolate_, 0));
+ array_buffer_transfer_map_ = isolate_->global_handles()->Create(
+ *SimpleNumberDictionary::New(isolate_, 0));
}
- Handle<NumberDictionary> dictionary =
+ Handle<SimpleNumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- Handle<NumberDictionary> new_dictionary =
- NumberDictionary::Set(dictionary, transfer_id, array_buffer);
+ Handle<SimpleNumberDictionary> new_dictionary =
+ SimpleNumberDictionary::Set(dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ =
@@ -1182,15 +1184,16 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
return ReadJSMap();
case SerializationTag::kBeginJSSet:
return ReadJSSet();
- case SerializationTag::kArrayBuffer:
- return ReadJSArrayBuffer();
- case SerializationTag::kArrayBufferTransfer: {
+ case SerializationTag::kArrayBuffer: {
const bool is_shared = false;
- return ReadTransferredJSArrayBuffer(is_shared);
+ return ReadJSArrayBuffer(is_shared);
+ }
+ case SerializationTag::kArrayBufferTransfer: {
+ return ReadTransferredJSArrayBuffer();
}
case SerializationTag::kSharedArrayBuffer: {
const bool is_shared = true;
- return ReadTransferredJSArrayBuffer(is_shared);
+ return ReadJSArrayBuffer(is_shared);
}
case SerializationTag::kWasmModule:
return ReadWasmModule();
@@ -1572,8 +1575,25 @@ MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
return scope.CloseAndEscape(set);
}
-MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
+ bool is_shared) {
uint32_t id = next_id_++;
+ if (is_shared) {
+ uint32_t clone_id;
+ Local<SharedArrayBuffer> sab_value;
+ if (!ReadVarint<uint32_t>().To(&clone_id) || delegate_ == nullptr ||
+ !delegate_
+ ->GetSharedArrayBufferFromId(
+ reinterpret_cast<v8::Isolate*>(isolate_), clone_id)
+ .ToLocal(&sab_value)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSArrayBuffer);
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<JSArrayBuffer> array_buffer = Utils::OpenHandle(*sab_value);
+ DCHECK_EQ(is_shared, array_buffer->is_shared());
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+ }
uint32_t byte_length;
if (!ReadVarint<uint32_t>().To(&byte_length) ||
byte_length > static_cast<size_t>(end_ - position_)) {
@@ -1592,22 +1612,20 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
return array_buffer;
}
-MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
- bool is_shared) {
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer() {
uint32_t id = next_id_++;
uint32_t transfer_id;
- Handle<NumberDictionary> transfer_map;
+ Handle<SimpleNumberDictionary> transfer_map;
if (!ReadVarint<uint32_t>().To(&transfer_id) ||
!array_buffer_transfer_map_.ToHandle(&transfer_map)) {
return MaybeHandle<JSArrayBuffer>();
}
int index = transfer_map->FindEntry(isolate_, transfer_id);
- if (index == NumberDictionary::kNotFound) {
+ if (index == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSArrayBuffer>();
}
Handle<JSArrayBuffer> array_buffer(
JSArrayBuffer::cast(transfer_map->ValueAt(index)), isolate_);
- DCHECK_EQ(is_shared, array_buffer->is_shared());
AddObjectWithID(id, array_buffer);
return array_buffer;
}
@@ -1628,6 +1646,16 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
uint32_t id = next_id_++;
ExternalArrayType external_array_type = kExternalInt8Array;
unsigned element_size = 0;
+
+ if (!FLAG_harmony_bigint) {
+ // Refuse to construct BigInt64Arrays unless the flag is on.
+ ArrayBufferViewTag cast_tag = static_cast<ArrayBufferViewTag>(tag);
+ if (cast_tag == ArrayBufferViewTag::kBigInt64Array ||
+ cast_tag == ArrayBufferViewTag::kBigUint64Array) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ }
+
switch (static_cast<ArrayBufferViewTag>(tag)) {
case ArrayBufferViewTag::kDataView: {
Handle<JSDataView> data_view =
@@ -1714,8 +1742,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
}
if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
- result = wasm::SyncCompile(isolate_, &thrower,
- wasm::ModuleWireBytes(wire_bytes));
+ result = isolate_->wasm_engine()->SyncCompile(
+ isolate_, &thrower, wasm::ModuleWireBytes(wire_bytes));
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
uint32_t id = next_id_++;
@@ -1744,7 +1772,7 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
const bool is_shared = true;
Handle<JSArrayBuffer> buffer;
- if (!ReadTransferredJSArrayBuffer(is_shared).ToHandle(&buffer)) {
+ if (!ReadJSArrayBuffer(is_shared).ToHandle(&buffer)) {
return MaybeHandle<WasmMemoryObject>();
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index a272fa0945..f719eb8206 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -266,8 +266,9 @@ class ValueDeserializer {
MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
MaybeHandle<JSMap> ReadJSMap() WARN_UNUSED_RESULT;
MaybeHandle<JSSet> ReadJSSet() WARN_UNUSED_RESULT;
- MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer() WARN_UNUSED_RESULT;
- MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer(bool is_shared)
+ MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(bool is_shared)
+ WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer()
WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
@@ -300,7 +301,7 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
- MaybeHandle<NumberDictionary> array_buffer_transfer_map_;
+ MaybeHandle<SimpleNumberDictionary> array_buffer_transfer_map_;
DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index 20d9c71fe7..93b3eb902d 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -5,12 +5,16 @@
#ifndef V8_VERSION_H_
#define V8_VERSION_H_
+#include <cstdint>
+
#include "src/base/functional.h"
-#include "src/vector.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Vector;
+
class Version {
public:
// Return the various version components.
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/visitors.cc
index 98911f1c28..a877fc30ef 100644
--- a/deps/v8/src/visitors.cc
+++ b/deps/v8/src/visitors.cc
@@ -9,16 +9,19 @@
namespace v8 {
namespace internal {
-#define DECLARE_TAG(ignore1, name, ignore2) name,
-const char* const
- VisitorSynchronization::kTags[VisitorSynchronization::kNumberOfSyncTags] = {
- ROOT_ID_LIST(DECLARE_TAG)};
-#undef DECLARE_TAG
-
-#define DECLARE_TAG(ignore1, ignore2, name) name,
-const char* const VisitorSynchronization::kTagNames
- [VisitorSynchronization::kNumberOfSyncTags] = {ROOT_ID_LIST(DECLARE_TAG)};
-#undef DECLARE_TAG
+const char* RootVisitor::RootName(Root root) {
+ switch (root) {
+#define ROOT_CASE(root_id, description) \
+ case Root::root_id: \
+ return description;
+ ROOT_ID_LIST(ROOT_CASE)
+#undef ROOT_CASE
+ case Root::kNumberOfRoots:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index 7696df8faf..31ee0795d4 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -13,45 +13,42 @@ namespace internal {
class CodeDataContainer;
class Object;
-#define ROOT_ID_LIST(V) \
- V(kStringTable, "string_table", "(Internalized strings)") \
- V(kExternalStringsTable, "external_strings_table", "(External strings)") \
- V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSmiRootList, "smi_root_list", "(Smi roots)") \
- V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
- V(kTop, "top", "(Isolate)") \
- V(kRelocatable, "relocatable", "(Relocatable)") \
- V(kDebug, "debug", "(Debugger)") \
- V(kCompilationCache, "compilationcache", "(Compilation cache)") \
- V(kHandleScope, "handlescope", "(Handle scope)") \
- V(kDispatchTable, "dispatchtable", "(Dispatch table)") \
- V(kBuiltins, "builtins", "(Builtins)") \
- V(kGlobalHandles, "globalhandles", "(Global handles)") \
- V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
- V(kThreadManager, "threadmanager", "(Thread manager)") \
- V(kStrongRoots, "strong roots", "(Strong roots)") \
- V(kExtensions, "Extensions", "(Extensions)")
+#define ROOT_ID_LIST(V) \
+ V(kStringTable, "(Internalized strings)") \
+ V(kExternalStringsTable, "(External strings)") \
+ V(kStrongRootList, "(Strong roots)") \
+ V(kSmiRootList, "(Smi roots)") \
+ V(kBootstrapper, "(Bootstrapper)") \
+ V(kTop, "(Isolate)") \
+ V(kRelocatable, "(Relocatable)") \
+ V(kDebug, "(Debugger)") \
+ V(kCompilationCache, "(Compilation cache)") \
+ V(kHandleScope, "(Handle scope)") \
+ V(kDispatchTable, "(Dispatch table)") \
+ V(kBuiltins, "(Builtins)") \
+ V(kGlobalHandles, "(Global handles)") \
+ V(kEternalHandles, "(Eternal handles)") \
+ V(kThreadManager, "(Thread manager)") \
+ V(kStrongRoots, "(Strong roots)") \
+ V(kExtensions, "(Extensions)") \
+ V(kCodeFlusher, "(Code flusher)") \
+ V(kPartialSnapshotCache, "(Partial snapshot cache)") \
+ V(kWeakCollections, "(Weak collections)") \
+ V(kWrapperTracing, "(Wrapper tracing)") \
+ V(kUnknown, "(Unknown)")
class VisitorSynchronization : public AllStatic {
public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
enum SyncTag { ROOT_ID_LIST(DECLARE_ENUM) kNumberOfSyncTags };
#undef DECLARE_ENUM
-
- static const char* const kTags[kNumberOfSyncTags];
- static const char* const kTagNames[kNumberOfSyncTags];
};
enum class Root {
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
ROOT_ID_LIST(DECLARE_ENUM)
#undef DECLARE_ENUM
- // TODO(ulan): Merge with the ROOT_ID_LIST.
- kCodeFlusher,
- kPartialSnapshotCache,
- kWeakCollections,
- kWrapperTracing,
- kUnknown
+ kNumberOfRoots
};
// Abstract base class for visiting, and optionally modifying, the
@@ -62,11 +59,13 @@ class RootVisitor BASE_EMBEDDED {
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
- virtual void VisitRootPointers(Root root, Object** start, Object** end) = 0;
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) = 0;
// Handy shorthand for visiting a single pointer.
- virtual void VisitRootPointer(Root root, Object** p) {
- VisitRootPointers(root, p, p + 1);
+ virtual void VisitRootPointer(Root root, const char* description,
+ Object** p) {
+ VisitRootPointers(root, description, p, p + 1);
}
// Intended for serialization/deserialization checking: insert, or
@@ -74,6 +73,8 @@ class RootVisitor BASE_EMBEDDED {
// Also used for marking up GC roots in heap snapshots.
// TODO(ulan): Remove this.
virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+
+ static const char* RootName(Root root);
};
// Abstract base class for visiting, and optionally modifying, the
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index e68fb0847d..2b6cc5c057 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -6,8 +6,6 @@ bradnelson@chromium.org
clemensh@chromium.org
gdeepti@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 7f7993d34f..ef8893f005 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#ifndef V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
+#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 8d28c2b21c..09bce6d450 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
+#define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index a8b5b32bdc..35943554cc 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#ifndef V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
+#define V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
+// is located at ebp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
- // is located at ebp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- ebp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -41,26 +49,45 @@ static constexpr Register kCCallLastArgAddrReg = eax;
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- sub(esp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
- xor_(reg.gp(), reg.gp());
- } else {
- mov(reg.gp(), Immediate(value.to_i32()));
- }
+ TurboAssembler::Move(
+ reg.gp(),
+ Immediate(reinterpret_cast<Address>(value.to_i32()), rmode));
break;
- case kWasmF32: {
- Register tmp = GetUnusedRegister(kGpReg).gp();
- mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
- movd(reg.fp(), tmp);
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
+ TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -86,20 +113,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, dst.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm)
: Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- mov(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
if (protected_load_pc) *protected_load_pc = pc_offset();
+
switch (type.value()) {
case LoadType::kI32Load8U:
movzx_b(dst.gp(), src_op);
@@ -107,18 +130,61 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kI32Load8S:
movsx_b(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8U:
+ movzx_b(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load8S:
+ movsx_b(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load16U:
movzx_w(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsx_w(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16U:
+ movzx_w(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load16S:
+ movsx_w(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load:
mov(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32U:
+ mov(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ mov(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ // Compute the operand for the load of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_src_op =
+ offset_reg == no_reg
+ ? Operand(src_addr, offset_imm + 4)
+ : Operand(src_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(dst.high_gp(), upper_src_op);
+ mov(dst.low_gp(), src_op);
+ break;
+ }
case LoadType::kF32Load:
movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -128,21 +194,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, src.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
- mov(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
if (protected_store_pc) *protected_store_pc = pc_offset();
+
switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store8:
// Only the lower 4 registers can be addressed as 8-bit registers.
if (src.gp().is_byte_register()) {
@@ -153,80 +218,139 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
mov_b(dst_op, byte_src);
}
break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store16:
mov_w(dst_op, src.gp());
break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store:
mov(dst_op, src.gp());
break;
+ case StoreType::kI64Store: {
+ // Compute the operand for the store of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_dst_op =
+ offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm + 4)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(upper_dst_op, src.high_gp());
+ mov(dst_op, src.low_gp());
+ break;
+ }
case StoreType::kF32Store:
movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
- if (dst.is_gp()) {
- mov(dst.gp(), src);
- } else {
- movss(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(dst.gp(), src);
+ break;
+ case kWasmF32:
+ movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
push(liftoff::GetStackSlot(src_index));
pop(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
- reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
+ : reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- mov(dst.gp(), src.gp());
+ DCHECK_EQ(kWasmI32, type);
+ mov(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ movss(dst, src);
} else {
- movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmF64, type);
+ movsd(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(dst, reg.gp());
- } else {
- movsd(dst, reg.fp());
+ switch (type) {
+ case kWasmI32:
+ mov(dst, reg.gp());
+ break;
+ case kWasmI64:
+ mov(dst, reg.low_gp());
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), reg.high_gp());
+ break;
+ case kWasmF32:
+ movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ mov(dst, Immediate(low_word));
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
mov(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -235,16 +359,32 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(reg.gp(), src);
- } else {
- movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(reg.gp(), src);
+ break;
+ case kWasmI64:
+ mov(reg.low_gp(), src);
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ mov(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
@@ -286,8 +426,11 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
+ pinned.set(dst);
+ pinned.set(lhs);
+ pinned.set(rhs);
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
@@ -302,7 +445,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// first. If lhs is ecx, lhs is now the tmp register.
Register tmp_reg = no_reg;
if (rhs != ecx) {
- if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
+ pinned.has(LiftoffRegister(ecx))) {
tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->mov(tmp_reg, ecx);
if (lhs == ecx) lhs = tmp_reg;
@@ -319,30 +463,19 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- Register tmp_byte_reg = dst;
- // Only the lower 4 registers can be addressed as 8-bit registers.
- if (!dst.is_byte_register()) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
- tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- }
-
- test(src, src);
- setcc(zero, tmp_byte_reg);
- movzx_b(dst, tmp_byte_reg);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -432,22 +565,141 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmp(lhs, rhs);
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmp(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ test(lhs, lhs);
+ }
+
j(cond, label);
}
+namespace liftoff {
+inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
+ Register tmp_byte_reg = dst;
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (!dst.is_byte_register()) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ // {GetUnusedRegister()} may insert move instructions to spill registers to
+ // the stack. This is OK because {mov} does not change the status flags.
+ tmp_byte_reg = assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ }
+
+ assm->setcc(cond, tmp_byte_reg);
+ assm->movzx_b(dst, tmp_byte_reg);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmp(lhs, rhs);
+ } else {
+ test(lhs, lhs);
+ }
+ liftoff::setcc_32(this, cond, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ mov(dst, Immediate(1));
+ } else {
+ xor_(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ liftoff::setcc_32(this, cond, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
- Register limit = GetUnusedRegister(kGpReg).gp();
- mov(limit, Immediate(ExternalReference::address_of_stack_limit(isolate())));
- cmp(esp, Operand(limit, 0));
+ cmp(esp,
+ Operand(Immediate(ExternalReference::address_of_stack_limit(isolate()))));
j(below_equal, ool_code);
}
@@ -462,27 +714,50 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index,
+ RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
- DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
- push(liftoff::GetStackSlot(src_index));
+ if (src.type() == kWasmF64) {
+ DCHECK_EQ(kLowWord, half);
+ push(liftoff::GetHalfStackSlot(2 * src_index - 1));
+ }
+ push(liftoff::GetHalfStackSlot(2 * src_index +
+ (half == kLowWord ? 0 : 1)));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ if (src.type() == kWasmI64) {
+ PushCallerFrameSlot(
+ half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
+ } else {
+ PushCallerFrameSlot(src.reg(), src.type());
+ }
break;
- case VarState::kI32Const:
- push(Immediate(src.i32_const()));
+ case VarState::KIntConst:
+ // The high word is the sign extension of the low word.
+ push(Immediate(half == kLowWord ? src.i32_const()
+ : src.i32_const() >> 31));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- push(reg.gp());
- } else {
- sub(esp, Immediate(kPointerSize));
- movss(Operand(esp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ push(reg.gp());
+ break;
+ case kWasmF32:
+ sub(esp, Immediate(sizeof(float)));
+ movss(Operand(esp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ sub(esp, Immediate(sizeof(double)));
+ movsd(Operand(esp, 0), reg.fp());
+ break;
+ default:
+ // Also kWasmI64 is unreachable, as it will always be pushed as two halfs.
+ UNREACHABLE();
}
}
@@ -571,6 +846,17 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ add(esp, Immediate(kPointerSize));
+ call(Operand(esp, -4));
+ } else {
+ call(target);
+ }
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
sub(esp, Immediate(size));
mov(addr, esp);
@@ -584,4 +870,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#endif // V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 3eef1e1960..26f59c68be 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -11,6 +11,10 @@
#include "src/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64.h"
#endif
namespace v8 {
@@ -19,8 +23,6 @@ namespace wasm {
#if V8_TARGET_ARCH_IA32
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
@@ -30,17 +32,31 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
#elif V8_TARGET_ARCH_X64
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
-#else
+#elif V8_TARGET_ARCH_MIPS
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7, v0, v1>();
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24>();
+
+#elif V8_TARGET_ARCH_MIPS64
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7, v0, v1>();
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24, f26>();
+
+#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
@@ -49,12 +65,45 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+
constexpr Condition kEqual = equal;
+constexpr Condition kUnequal = not_equal;
+constexpr Condition kSignedLessThan = less;
+constexpr Condition kSignedLessEqual = less_equal;
+constexpr Condition kSignedGreaterThan = greater;
+constexpr Condition kSignedGreaterEqual = greater_equal;
+constexpr Condition kUnsignedLessThan = below;
+constexpr Condition kUnsignedLessEqual = below_equal;
+constexpr Condition kUnsignedGreaterThan = above;
constexpr Condition kUnsignedGreaterEqual = above_equal;
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+
+constexpr Condition kEqual = eq;
+constexpr Condition kUnequal = ne;
+constexpr Condition kSignedLessThan = lt;
+constexpr Condition kSignedLessEqual = le;
+constexpr Condition kSignedGreaterThan = gt;
+constexpr Condition kSignedGreaterEqual = ge;
+constexpr Condition kUnsignedLessThan = ult;
+constexpr Condition kUnsignedLessEqual = ule;
+constexpr Condition kUnsignedGreaterThan = ugt;
+constexpr Condition kUnsignedGreaterEqual = uge;
+
#else
+
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
+constexpr Condition kUnequal = static_cast<Condition>(0);
+constexpr Condition kSignedLessThan = static_cast<Condition>(0);
+constexpr Condition kSignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterThan = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessThan = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedGreaterThan = static_cast<Condition>(0);
constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 121cfeea6a..09b8229dc1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -31,20 +31,45 @@ class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister dst;
LiftoffRegister src;
- constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
- : dst(dst), src(src) {}
+ ValueType type;
+ constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type)
+ : dst(dst), src(src), type(type) {}
};
struct RegisterLoad {
- LiftoffRegister dst;
- bool is_constant_load; // otherwise load it from the stack.
- union {
- uint32_t stack_slot;
- WasmValue constant;
+ enum LoadKind : uint8_t {
+ kConstant, // load a constant value into a register.
+ kStack, // fill a register from a stack slot.
+ kHalfStack // fill one half of a register pair from half a stack slot.
};
- RegisterLoad(LiftoffRegister dst, WasmValue constant)
- : dst(dst), is_constant_load(true), constant(constant) {}
- RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
- : dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
+
+ LiftoffRegister dst;
+ LoadKind kind;
+ ValueType type;
+ int32_t value; // i32 constant value or stack index, depending on kind.
+
+ // Named constructors.
+ static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
+ if (constant.type() == kWasmI32) {
+ return {dst, kConstant, kWasmI32, constant.to_i32()};
+ }
+ DCHECK_EQ(kWasmI64, constant.type());
+ DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
+ return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
+ }
+ static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
+ ValueType type) {
+ return {dst, kStack, type, stack_index};
+ }
+ static RegisterLoad HalfStack(LiftoffRegister dst,
+ int32_t half_stack_index) {
+ return {dst, kHalfStack, kWasmI32, half_stack_index};
+ }
+
+ private:
+ RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
+ int32_t value)
+ : dst(dst), kind(kind), type(type), value(value) {}
};
public:
@@ -55,15 +80,17 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
- if ((move_dst_regs & move_src_regs).is_empty()) {
+ if ((move_dst_regs_ & move_src_regs_).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
- for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
- register_moves.clear();
+ for (RegisterMove& rm : register_moves_) {
+ asm_->Move(rm.dst, rm.src, rm.type);
+ }
+ register_moves_.clear();
} else {
// Keep use counters of src registers.
uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
- for (RegisterMove& rm : register_moves) {
+ for (RegisterMove& rm : register_moves_) {
++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
@@ -73,11 +100,11 @@ class StackTransferRecipe {
// register to the stack, add a RegisterLoad to reload it later, and
// continue.
uint32_t next_spill_slot = asm_->cache_state()->stack_height();
- while (!register_moves.empty()) {
+ while (!register_moves_.empty()) {
int executed_moves = 0;
- for (auto& rm : register_moves) {
+ for (auto& rm : register_moves_) {
if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
- asm_->Move(rm.dst, rm.src);
+ asm_->Move(rm.dst, rm.src, rm.type);
++executed_moves;
DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
--src_reg_use_count[rm.src.liftoff_code()];
@@ -89,53 +116,64 @@ class StackTransferRecipe {
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
// TODO(clemensh): Use an unused register if available.
- LiftoffRegister spill_reg = register_moves.back().src;
- asm_->Spill(next_spill_slot, spill_reg);
+ RegisterMove& rm = register_moves_.back();
+ LiftoffRegister spill_reg = rm.src;
+ asm_->Spill(next_spill_slot, spill_reg, rm.type);
// Remember to reload into the destination register later.
- LoadStackSlot(register_moves.back().dst, next_spill_slot);
+ LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
- register_moves.erase(register_moves.end() - executed_moves,
- register_moves.end());
+ register_moves_.erase(register_moves_.end() - executed_moves,
+ register_moves_.end());
}
}
- for (RegisterLoad& rl : register_loads) {
- if (rl.is_constant_load) {
- asm_->LoadConstant(rl.dst, rl.constant);
- } else {
- asm_->Fill(rl.dst, rl.stack_slot);
+ for (RegisterLoad& rl : register_loads_) {
+ switch (rl.kind) {
+ case RegisterLoad::kConstant:
+ asm_->LoadConstant(rl.dst, rl.type == kWasmI64
+ ? WasmValue(int64_t{rl.value})
+ : WasmValue(int32_t{rl.value}));
+ break;
+ case RegisterLoad::kStack:
+ asm_->Fill(rl.dst, rl.value, rl.type);
+ break;
+ case RegisterLoad::kHalfStack:
+ // As half of a register pair, {rl.dst} must be a gp register.
+ asm_->FillI64Half(rl.dst.gp(), rl.value);
+ break;
}
}
- register_loads.clear();
+ register_loads_.clear();
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
uint32_t dst_index, uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
const VarState& src = __ cache_state()->stack_state[src_index];
+ DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
switch (src.loc()) {
case VarState::kStack:
if (src_index == dst_index) break;
- asm_->MoveStackValue(dst_index, src_index);
+ asm_->MoveStackValue(dst_index, src_index, src.type());
break;
case VarState::kRegister:
- asm_->Spill(dst_index, src.reg());
+ asm_->Spill(dst_index, src.reg(), src.type());
break;
- case VarState::kI32Const:
- asm_->Spill(dst_index, WasmValue(src.i32_const()));
+ case VarState::KIntConst:
+ asm_->Spill(dst_index, src.constant());
break;
}
break;
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
DCHECK_EQ(dst, src);
break;
}
@@ -146,40 +184,80 @@ class StackTransferRecipe {
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
- LoadStackSlot(dst, src_index);
+ LoadStackSlot(dst, src_index, src.type());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
+ break;
+ case VarState::KIntConst:
+ LoadConstant(dst, src.constant());
break;
- case VarState::kI32Const:
- LoadConstant(dst, WasmValue(src.i32_const()));
+ }
+ }
+
+ void LoadI64HalfIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t index, RegPairHalf half) {
+ // Use CHECK such that the remaining code is statically dead if
+ // {kNeedI64RegPair} is false.
+ CHECK(kNeedI64RegPair);
+ DCHECK_EQ(kWasmI64, src.type());
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadI64HalfStackSlot(dst, 2 * index + (half == kLowWord ? 0 : 1));
+ break;
+ case VarState::kRegister: {
+ LiftoffRegister src_half =
+ half == kLowWord ? src.reg().low() : src.reg().high();
+ if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
+ break;
+ }
+ case VarState::KIntConst:
+ int32_t value = src.i32_const();
+ // The high word is the sign extension of the low word.
+ if (half == kHighWord) value = value >> 31;
+ LoadConstant(dst, WasmValue(value));
break;
}
}
- void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK(!move_dst_regs.has(dst));
- move_dst_regs.set(dst);
- move_src_regs.set(src);
- register_moves.emplace_back(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ DCHECK_EQ(reg_class_for(type), src.reg_class());
+ if (src.is_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
+ if (dst.high() != src.high())
+ MoveRegister(dst.high(), src.high(), kWasmI32);
+ return;
+ }
+ DCHECK(!move_dst_regs_.has(dst));
+ move_dst_regs_.set(dst);
+ move_src_regs_.set(src);
+ register_moves_.emplace_back(dst, src, type);
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
- register_loads.emplace_back(dst, value);
+ register_loads_.push_back(RegisterLoad::Const(dst, value));
+ }
+
+ void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
+ ValueType type) {
+ register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
}
- void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
- register_loads.emplace_back(dst, stack_index);
+ void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
+ register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
- std::vector<RegisterMove> register_moves;
- std::vector<RegisterLoad> register_loads;
- LiftoffRegList move_dst_regs;
- LiftoffRegList move_src_regs;
+ std::vector<RegisterMove> register_moves_;
+ std::vector<RegisterLoad> register_loads_;
+ LiftoffRegList move_dst_regs_;
+ LiftoffRegList move_src_regs_;
LiftoffAssembler* const asm_;
};
@@ -301,16 +379,16 @@ LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
switch (slot.loc()) {
case VarState::kStack: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- Fill(reg, cache_state_.stack_height());
+ Fill(reg, cache_state_.stack_height(), slot.type());
return reg;
}
case VarState::kRegister:
DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::kI32Const: {
+ case VarState::KIntConst: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- LoadConstant(reg, WasmValue(slot.i32_const()));
+ LoadConstant(reg, slot.constant());
return reg;
}
}
@@ -335,6 +413,8 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// ^target_stack_base
uint32_t stack_height = cache_state_.stack_height();
uint32_t target_stack_height = target.stack_height();
+ DCHECK_LE(target_stack_height, stack_height);
+ DCHECK_LE(arity, target_stack_height);
uint32_t stack_base = stack_height - arity;
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
@@ -352,11 +432,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack:
return;
case VarState::kRegister:
- Spill(index, slot.reg());
+ Spill(index, slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
- case VarState::kI32Const:
- Spill(index, WasmValue(slot.i32_const()));
+ case VarState::KIntConst:
+ Spill(index, slot.constant());
break;
}
slot.MakeStack();
@@ -372,19 +452,17 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
- Spill(i, slot.reg());
+ Spill(i, slot.reg(), slot.type());
slot.MakeStack();
}
cache_state_.reset_used_registers();
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
+ compiler::CallDescriptor* call_descriptor,
+ Register* target,
+ LiftoffRegister* explicit_context) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
- // Parameter 0 is the wasm context.
- constexpr size_t kFirstActualParameter = 1;
- DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
-
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
@@ -394,66 +472,134 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
- Spill(idx, slot.reg());
+ Spill(idx, slot.reg(), slot.type());
slot.MakeStack();
}
StackTransferRecipe stack_transfers(this);
+ LiftoffRegList param_regs;
+
+ // Move the explicit context (if any) into the correct context register.
+ compiler::LinkageLocation context_loc =
+ call_descriptor->GetInputLocation(kInputShift);
+ DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
+ LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
+ param_regs.set(context_reg);
+ if (explicit_context && *explicit_context != context_reg) {
+ stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
+ }
// Now move all parameter values into the right slot for the call.
- // Process parameters backward, such that we can just pop values from the
- // stack.
+ // Don't pop values yet, such that the stack height is still correct when
+ // executing the {stack_transfers}.
+ // Process parameters backwards, such that pushes of caller frame slots are
+ // in the correct order.
+ uint32_t param_base = cache_state_.stack_height() - num_params;
+ uint32_t call_desc_input_idx =
+ static_cast<uint32_t>(call_descriptor->InputCount());
for (uint32_t i = num_params; i > 0; --i) {
- uint32_t param = i - 1;
+ const uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation loc = call_desc->GetInputLocation(
- param + kFirstActualParameter + kInputShift);
- const VarState& slot = cache_state_.stack_state.back();
- uint32_t stack_idx = cache_state_.stack_height() - 1;
- if (loc.IsRegister()) {
- DCHECK(!loc.IsAnyRegister());
- int reg_code = loc.AsRegister();
- LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
- stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ const bool is_pair = kNeedI64RegPair && type == kWasmI64;
+ const int num_lowered_params = is_pair ? 2 : 1;
+ const uint32_t stack_idx = param_base + param;
+ const VarState& slot = cache_state_.stack_state[stack_idx];
+ // Process both halfs of register pair separately, because they are passed
+ // as separate parameters. One or both of them could end up on the stack.
+ for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
+ const RegPairHalf half =
+ is_pair && lowered_idx == 0 ? kHighWord : kLowWord;
+ --call_desc_input_idx;
+ compiler::LinkageLocation loc =
+ call_descriptor->GetInputLocation(call_desc_input_idx);
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ RegClass rc = is_pair ? kGpReg : reg_class_for(type);
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, loc.AsRegister());
+ param_regs.set(reg);
+ if (is_pair) {
+ stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
+ } else {
+ stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ }
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ PushCallerFrameSlot(slot, stack_idx, half);
+ }
+ }
+ }
+ // {call_desc_input_idx} should point after the context parameter now.
+ DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
+
+ // If the target register overlaps with a parameter register, then move the
+ // target to another free register, or spill to the stack.
+ if (target && param_regs.has(LiftoffRegister(*target))) {
+ // Try to find another free register.
+ LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
+ if (!free_regs.is_empty()) {
+ LiftoffRegister new_target = free_regs.GetFirstRegSet();
+ stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
+ kWasmIntPtr);
+ *target = new_target.gp();
} else {
- DCHECK(loc.IsCallerFrameSlot());
- PushCallerFrameSlot(slot, stack_idx);
+ PushCallerFrameSlot(LiftoffRegister(*target), kWasmIntPtr);
+ *target = no_reg;
}
- cache_state_.stack_state.pop_back();
}
// Execute the stack transfers before filling the context register.
stack_transfers.Execute();
+ // Pop parameters from the value stack.
+ auto stack_end = cache_state_.stack_state.end();
+ cache_state_.stack_state.erase(stack_end - num_params, stack_end);
+
// Reset register use counters.
cache_state_.reset_used_registers();
- // Fill the wasm context into the right register.
- compiler::LinkageLocation context_loc =
- call_desc->GetInputLocation(kInputShift);
- DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
- int context_reg_code = context_loc.AsRegister();
- LiftoffRegister context_reg(Register::from_code(context_reg_code));
- FillContextInto(context_reg.gp());
+ // Reload the context from the stack.
+ if (!explicit_context) {
+ FillContextInto(context_reg.gp());
+ }
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
- size_t return_count = call_desc->ReturnCount();
- DCHECK_EQ(return_count, sig->return_count());
+ compiler::CallDescriptor* call_descriptor) {
+ const size_t return_count = sig->return_count();
if (return_count != 0) {
DCHECK_EQ(1, return_count);
- compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
- int return_reg_code = return_loc.AsRegister();
ValueType return_type = sig->GetReturn(0);
- LiftoffRegister return_reg =
- LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
+ const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
+ DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
+ RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
+ LiftoffRegister return_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(0).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(return_reg));
+ if (need_pair) {
+ LiftoffRegister high_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(1).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(high_reg));
+ return_reg = LiftoffRegister::ForPair(return_reg, high_reg);
+ }
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
}
}
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type) {
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (kNeedI64RegPair && dst.is_pair()) {
+ // Use the {StackTransferRecipe} to move pairs, as the registers in the
+ // pairs might overlap.
+ StackTransferRecipe(this).MoveRegister(dst, src, type);
+ } else if (dst.is_gp()) {
+ Move(dst.gp(), src.gp(), type);
+ } else {
+ Move(dst.fp(), src.fp(), type);
+ }
+}
+
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
@@ -468,8 +614,14 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto* slot = &cache_state_.stack_state[idx];
- if (!slot->is_reg() || slot->reg() != reg) continue;
- Spill(idx, reg);
+ if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
+ if (slot->reg().is_pair()) {
+ // Make sure to decrement *both* registers in a pair, because the
+ // {clear_used} call below only clears one of them.
+ cache_state_.dec_used(slot->reg().low());
+ cache_state_.dec_used(slot->reg().high());
+ }
+ Spill(idx, slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
@@ -486,10 +638,6 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
}
-uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
- return num_locals() + kMaxValueStackHeight;
-}
-
std::ostream& operator<<(std::ostream& os, VarState slot) {
os << WasmOpcodes::TypeName(slot.type()) << ":";
switch (slot.loc()) {
@@ -497,7 +645,7 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
return os << "s";
case VarState::kRegister:
return os << slot.reg();
- case VarState::kI32Const:
+ case VarState::KIntConst:
return os << "c" << slot.i32_const();
}
UNREACHABLE();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index b91f6d7c88..99d9814dea 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
@@ -29,35 +27,35 @@ struct ModuleEnv;
class LiftoffAssembler : public TurboAssembler {
public:
- // TODO(clemensh): Remove this limitation by allocating more stack space if
- // needed.
- static constexpr int kMaxValueStackHeight = 8;
-
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8;
+ static constexpr ValueType kWasmIntPtr =
+ kPointerSize == 8 ? kWasmI64 : kWasmI32;
+
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, kI32Const };
+ enum Location : uint8_t { kStack, kRegister, KIntConst };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
explicit VarState(ValueType type, LiftoffRegister r)
: loc_(kRegister), type_(type), reg_(r) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
- explicit VarState(ValueType type, uint32_t i32_const)
- : loc_(kI32Const), type_(type), i32_const_(i32_const) {
+ explicit VarState(ValueType type, int32_t i32_const)
+ : loc_(KIntConst), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
bool operator==(const VarState& other) const {
if (loc_ != other.loc_) return false;
+ if (type_ != other.type_) return false;
switch (loc_) {
case kStack:
return true;
case kRegister:
return reg_ == other.reg_;
- case kI32Const:
+ case KIntConst:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
@@ -67,16 +65,23 @@ class LiftoffAssembler : public TurboAssembler {
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == kI32Const; }
+ bool is_const() const { return loc_ == KIntConst; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
- uint32_t i32_const() const {
- DCHECK_EQ(loc_, kI32Const);
+ int32_t i32_const() const {
+ DCHECK_EQ(loc_, KIntConst);
return i32_const_;
}
+ WasmValue constant() const {
+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK_EQ(loc_, KIntConst);
+ return type_ == kWasmI32 ? WasmValue(i32_const_)
+ : WasmValue(int64_t{i32_const_});
+ }
+
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
LiftoffRegister reg() const {
@@ -95,7 +100,7 @@ class LiftoffAssembler : public TurboAssembler {
union {
LiftoffRegister reg_; // used if loc_ == kRegister
- uint32_t i32_const_; // used if loc_ == kI32Const
+ int32_t i32_const_; // used if loc_ == KIntConst
};
};
@@ -117,6 +122,11 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList available_regs =
+ kGpCacheRegList & ~used_registers & ~pinned;
+ return available_regs.GetNumRegsSet() >= 2;
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
@@ -130,9 +140,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegister low = pinned.set(unused_register(kGpReg, pinned));
+ LiftoffRegister high = unused_register(kGpReg, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
- return unused_register(candidates);
+ return unused_register(candidates, pinned);
}
LiftoffRegister unused_register(LiftoffRegList candidates,
@@ -142,22 +157,31 @@ class LiftoffAssembler : public TurboAssembler {
}
void inc_used(LiftoffRegister reg) {
+ if (reg.is_pair()) {
+ inc_used(reg.low());
+ inc_used(reg.high());
+ return;
+ }
used_registers.set(reg);
DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
- bool dec_used(LiftoffRegister reg) {
+ void dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
+ if (reg.is_pair()) {
+ dec_used(reg.low());
+ dec_used(reg.high());
+ return;
+ }
int code = reg.liftoff_code();
DCHECK_LT(0, register_use_count[code]);
- if (--register_use_count[code] != 0) return false;
- used_registers.clear(reg);
- return true;
+ if (--register_use_count[code] == 0) used_registers.clear(reg);
}
bool is_used(LiftoffRegister reg) const {
+ if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
bool used = used_registers.has(reg);
DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
@@ -239,6 +263,12 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList candidates = kGpCacheRegList;
+ LiftoffRegister low = pinned.set(GetUnusedRegister(candidates, pinned));
+ LiftoffRegister high = GetUnusedRegister(candidates, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
@@ -270,18 +300,36 @@ class LiftoffAssembler : public TurboAssembler {
void SpillLocals();
void SpillAllRegisters();
+ // Call this method whenever spilling something, such that the number of used
+ // spill slot can be tracked and the stack frame will be allocated big enough.
+ void RecordUsedSpillSlot(uint32_t index) {
+ if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
+ }
+
// Load parameters into the right registers / stack slots for the call.
- void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Move {*target} into another register if needed and update {*target} to that
+ // register, or {no_reg} if target was spilled to the stack.
+ void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
+ Register* target = nullptr,
+ LiftoffRegister* explicit_context = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
- inline void ReserveStackSpace(uint32_t bytes);
+ // This function emits machine code to prepare the stack frame, before the
+ // size of the stack frame is known. It returns an offset in the machine code
+ // which can later be patched (via {PatchPrepareStackFrame)} when the size of
+ // the frame is known.
+ inline uint32_t PrepareStackFrame();
+ inline void PatchPrepareStackFrame(uint32_t offset, uint32_t stack_slots);
- inline void LoadConstant(LiftoffRegister, WasmValue);
+ inline void LoadConstant(LiftoffRegister, WasmValue,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
@@ -291,16 +339,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr);
- inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
- inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
+ ValueType);
+ inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
- inline void MoveToReturnRegister(LiftoffRegister);
- // TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
- inline void Move(LiftoffRegister dst, LiftoffRegister src);
+ inline void MoveToReturnRegister(LiftoffRegister src, ValueType);
+ inline void Move(Register dst, Register src, ValueType);
+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
- inline void Spill(uint32_t index, LiftoffRegister);
+ inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
- inline void Fill(LiftoffRegister, uint32_t index);
+ inline void Fill(LiftoffRegister, uint32_t index, ValueType);
+ inline void FillI64Half(Register, uint32_t half_index);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
@@ -309,29 +359,49 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
- inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
// i32 unops.
- inline bool emit_i32_eqz(Register dst, Register src);
inline bool emit_i32_clz(Register dst, Register src);
inline bool emit_i32_ctz(Register dst, Register src);
inline bool emit_i32_popcnt(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+ // f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ // f32 unops.
+ inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
+
+ // f64 binops.
+ inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+
+ // f64 unops.
+ inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
- inline void emit_i32_test(Register);
- inline void emit_i32_compare(Register, Register);
inline void emit_jump(Label*);
- inline void emit_cond_jump(Condition, Label*);
+ inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
+ Register rhs = no_reg);
+ // Set {dst} to 1 if condition holds, 0 otherwise.
+ inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
+ Register rhs = no_reg);
+ inline void emit_f32_set_cond(Condition, Register dst, DoubleRegister lhs,
+ DoubleRegister rhs);
inline void StackCheck(Label* ool_code);
@@ -340,8 +410,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
- inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
- inline void PushCallerFrameSlot(LiftoffRegister reg);
+ inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index,
+ RegPairHalf half);
+ inline void PushCallerFrameSlot(LiftoffRegister reg, ValueType type);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
@@ -358,8 +429,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void CallNativeWasmCode(Address addr);
-
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
+ // Indirect call: If {target == no_reg}, then pop the target from the stack.
+ inline void CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target);
// Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size);
@@ -372,7 +446,9 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
- uint32_t GetTotalFrameSlotCount() const;
+ uint32_t GetTotalFrameSlotCount() const {
+ return num_locals_ + num_used_spill_slots_;
+ }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
@@ -389,6 +465,9 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
+ bool did_bailout() { return bailout_reason_ != nullptr; }
+ const char* bailout_reason() const { return bailout_reason_; }
+
private:
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
@@ -399,9 +478,15 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
+ uint32_t num_used_spill_slots_ = 0;
+ const char* bailout_reason_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
+
+ void bailout(const char* reason) {
+ if (bailout_reason_ == nullptr) bailout_reason_ = reason;
+ }
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 255ee0347e..c6adb90f82 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -20,7 +20,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto kI32Const = LiftoffAssembler::VarState::kI32Const;
+constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -41,6 +41,8 @@ class MovableLabel {
Label* get() { return label_.get(); }
MovableLabel() : MovableLabel(new Label()) {}
+ operator bool() const { return label_ != nullptr; }
+
static MovableLabel None() { return MovableLabel(nullptr); }
private:
@@ -53,6 +55,8 @@ class MovableLabel {
public:
Label* get() { return &label_; }
+ operator bool() const { return true; }
+
static MovableLabel None() { return MovableLabel(); }
private:
@@ -60,6 +64,25 @@ class MovableLabel {
};
#endif
+wasm::WasmValue WasmPtrValue(uintptr_t ptr) {
+ using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
+ static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
+ return wasm::WasmValue(static_cast<int_t>(ptr));
+}
+
+wasm::WasmValue WasmPtrValue(void* ptr) {
+ return WasmPtrValue(reinterpret_cast<uintptr_t>(ptr));
+}
+
+compiler::CallDescriptor* GetLoweredCallDescriptor(
+ Zone* zone, compiler::CallDescriptor* call_desc) {
+ return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
+ : call_desc;
+}
+
+constexpr ValueType kTypesArr_ilfd[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64};
+constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
+
class LiftoffCompiler {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
@@ -106,32 +129,30 @@ class LiftoffCompiler {
};
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env,
+ compiler::CallDescriptor* call_descriptor,
+ compiler::ModuleEnv* env,
compiler::RuntimeExceptionSupport runtime_exception_support,
SourcePositionTableBuilder* source_position_table_builder,
std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
: asm_(liftoff_asm),
- call_desc_(call_desc),
+ descriptor_(
+ GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
- min_size_(env_->module->initial_pages * wasm::kWasmPageSize),
- max_size_((env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
+ min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
+ max_size_(uint64_t{env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
runtime_exception_support_(runtime_exception_support),
source_position_table_builder_(source_position_table_builder),
protected_instructions_(protected_instructions),
compilation_zone_(compilation_zone),
codegen_zone_(codegen_zone),
- safepoint_table_builder_(compilation_zone_) {
- // Check for overflow in max_size_.
- DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages} *
- wasm::kWasmPageSize);
- }
+ safepoint_table_builder_(compilation_zone_) {}
+
+ ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
bool ok() const { return ok_; }
@@ -142,6 +163,26 @@ class LiftoffCompiler {
BindUnboundLabels(decoder);
}
+ bool DidAssemblerBailout(Decoder* decoder) {
+ if (decoder->failed() || !asm_->did_bailout()) return false;
+ unsupported(decoder, asm_->bailout_reason());
+ return true;
+ }
+
+ bool CheckSupportedType(Decoder* decoder,
+ Vector<const ValueType> supported_types,
+ ValueType type, const char* context) {
+ char buffer[128];
+ // Check supported types.
+ for (ValueType supported : supported_types) {
+ if (type == supported) return true;
+ }
+ SNPrintF(ArrayVector(buffer), "%s %s", WasmOpcodes::TypeName(type),
+ context);
+ unsupported(decoder, buffer);
+ return false;
+ }
+
int GetSafepointTableOffset() const {
return safepoint_table_builder_.GetCodeOffset();
}
@@ -150,7 +191,8 @@ class LiftoffCompiler {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
- for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
+ uint32_t control_depth = decoder ? decoder->control_depth() : 0;
+ for (uint32_t i = 0; i < control_depth; ++i) {
Control* c = decoder->control_at(i);
Label* label = c->label.get();
if (!label->is_bound()) __ bind(label);
@@ -165,14 +207,6 @@ class LiftoffCompiler {
#endif
}
- void CheckStackSizeLimit(Decoder* decoder) {
- DCHECK_GE(__ cache_state()->stack_height(), __ num_locals());
- int stack_height = __ cache_state()->stack_height() - __ num_locals();
- if (stack_height > LiftoffAssembler::kMaxValueStackHeight) {
- unsupported(decoder, "value stack grows too large");
- }
- }
-
void StartFunction(Decoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
@@ -181,37 +215,48 @@ class LiftoffCompiler {
}
}
- void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
- ValueType type = __ local_type(param_idx);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation param_loc =
- call_desc_->GetInputLocation(input_location);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- int reg_code = param_loc.AsRegister();
- LiftoffRegister reg =
- rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
- : LiftoffRegister(DoubleRegister::from_code(reg_code));
- LiftoffRegList cache_regs =
- rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
- if (cache_regs.has(reg)) {
- // This is a cache register, just use it.
- __ PushRegister(type, reg);
- return;
+ // Returns the number of inputs processed (1 or 2).
+ uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
+ const int num_lowered_params = 1 + (kNeedI64RegPair && type == kWasmI64);
+ // Initialize to anything, will be set in the loop and used afterwards.
+ LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
+ RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
+ LiftoffRegList pinned;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ compiler::LinkageLocation param_loc =
+ descriptor_->GetInputLocation(input_idx + pair_idx);
+ // Initialize to anything, will be set in both arms of the if.
+ LiftoffRegister in_reg = LiftoffRegister::from_code(kGpReg, 0);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ int reg_code = param_loc.AsRegister();
+ RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
+ : kLiftoffAssemblerFpCacheRegs;
+ if (cache_regs & (1 << reg_code)) {
+ // This is a cache register, just use it.
+ in_reg = LiftoffRegister::from_code(rc, reg_code);
+ } else {
+ // Move to a cache register (spill one if necessary).
+ // Note that we cannot create a {LiftoffRegister} for reg_code, since
+ // {LiftoffRegister} can only store cache regs.
+ LiftoffRegister in_reg = __ GetUnusedRegister(rc, pinned);
+ if (rc == kGpReg) {
+ __ Move(in_reg.gp(), Register::from_code(reg_code), type);
+ } else {
+ __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), type);
+ }
+ }
+ } else if (param_loc.IsCallerFrameSlot()) {
+ in_reg = __ GetUnusedRegister(rc, pinned);
+ ValueType lowered_type = num_lowered_params == 1 ? type : kWasmI32;
+ __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
+ lowered_type);
}
- // Move to a cache register.
- LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
- __ Move(cache_reg, reg);
- __ PushRegister(type, reg);
- return;
+ reg = pair_idx == 0 ? in_reg : LiftoffRegister::ForPair(reg, in_reg);
+ pinned.set(reg);
}
- if (param_loc.IsCallerFrameSlot()) {
- LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
- __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
- __ PushRegister(type, tmp_reg);
- return;
- }
- UNREACHABLE();
+ __ PushRegister(type, reg);
+ return num_lowered_params;
}
void StackCheck(wasm::WasmCodePosition position) {
@@ -220,69 +265,65 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
__ StackCheck(ool.label.get());
- __ bind(ool.continuation.get());
+ if (ool.continuation) __ bind(ool.continuation.get());
}
void StartFunctionBody(Decoder* decoder, Control* block) {
- if (!kLiftoffAssemblerImplementedOnThisPlatform) {
- unsupported(decoder, "platform");
- return;
- }
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
- __ ReserveStackSpace(LiftoffAssembler::kStackSlotSize *
- __ GetTotalFrameSlotCount());
+ pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
+ // {PrepareStackFrame} is the first platform-specific assembler method.
+ // If this failed, we can bail out immediately, avoiding runtime overhead
+ // and potential failures because of other unimplemented methods.
+ // A platform implementing {PrepareStackFrame} must ensure that we can
+ // finish compilation without errors even if we hit unimplemented
+ // LiftoffAssembler methods.
+ if (DidAssemblerBailout(decoder)) return;
// Parameter 0 is the wasm context.
uint32_t num_params =
- static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
+ static_cast<uint32_t>(decoder->sig_->parameter_count());
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- switch (__ local_type(i)) {
- case kWasmI32:
- case kWasmF32:
- // supported.
- break;
- case kWasmI64:
- unsupported(decoder, "i64 param/local");
- return;
- case kWasmF64:
- unsupported(decoder, "f64 param/local");
- return;
- default:
- unsupported(decoder, "exotic param/local");
- return;
- }
+ if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
+ return;
}
// Input 0 is the call target, the context is at 1.
constexpr int kContextParameterIndex = 1;
// Store the context parameter to a special stack slot.
compiler::LinkageLocation context_loc =
- call_desc_->GetInputLocation(kContextParameterIndex);
+ descriptor_->GetInputLocation(kContextParameterIndex);
DCHECK(context_loc.IsRegister());
DCHECK(!context_loc.IsAnyRegister());
Register context_reg = Register::from_code(context_loc.AsRegister());
__ SpillContext(context_reg);
- uint32_t param_idx = 0;
- for (; param_idx < num_params; ++param_idx) {
- constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
- ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
+ // Input 0 is the code target, 1 is the context. First parameter at 2.
+ uint32_t input_idx = kContextParameterIndex + 1;
+ for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
+ input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
+ DCHECK_EQ(input_idx, descriptor_->InputCount());
// Set to a gp register, to mark this uninitialized.
LiftoffRegister zero_double_reg(Register::from_code<0>());
DCHECK(zero_double_reg.is_gp());
- for (; param_idx < __ num_locals(); ++param_idx) {
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
+ case kWasmI64:
+ __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
+ break;
case kWasmF32:
+ case kWasmF64:
if (zero_double_reg.is_gp()) {
// Note: This might spill one of the registers used to hold
// parameters.
zero_double_reg = __ GetUnusedRegister(kFpReg);
- __ LoadConstant(zero_double_reg, WasmValue(0.f));
+ // Zero is represented by the bit pattern 0 for both f32 and f64.
+ __ LoadConstant(zero_double_reg, WasmValue(0.));
}
- __ PushRegister(kWasmF32, zero_double_reg);
+ __ PushRegister(type, zero_double_reg);
break;
default:
UNIMPLEMENTED();
@@ -294,9 +335,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(0);
- DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
- CheckStackSizeLimit(decoder);
}
void GenerateOutOfLineCode(OutOfLineCode& ool) {
@@ -338,10 +377,13 @@ class LiftoffCompiler {
}
void FinishFunction(Decoder* decoder) {
+ if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
}
safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
+ __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
+ __ GetTotalFrameSlotCount());
}
void OnFirstError(Decoder* decoder) {
@@ -391,8 +433,8 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, if_block->else_state->label.get());
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
+ value);
if_block->label_state.stack_base = __ cache_state()->stack_height();
// Store the state (after popping the value) for executing the else branch.
@@ -433,14 +475,15 @@ class LiftoffCompiler {
DCHECK_LE(num_args, kMaxArgs);
MachineSignature sig(kNumReturns, num_args, kReps);
- compiler::CallDescriptor* desc =
+ auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
- uint32_t num_params = static_cast<uint32_t>(desc->ParameterCount());
+ uint32_t num_params =
+ static_cast<uint32_t>(call_descriptor->ParameterCount());
__ PrepareCCall(num_params, arg_regs);
// Set parameters (in sp[0], sp[8], ...).
@@ -449,7 +492,7 @@ class LiftoffCompiler {
constexpr size_t kInputShift = 1; // Input 0 is the call target.
compiler::LinkageLocation loc =
- desc->GetInputLocation(param + kInputShift);
+ call_descriptor->GetInputLocation(param + kInputShift);
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
@@ -465,126 +508,209 @@ class LiftoffCompiler {
__ CallC(ext_ref, num_params);
// Load return value.
- compiler::LinkageLocation return_loc = desc->GetReturnLocation(0);
+ compiler::LinkageLocation return_loc =
+ call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != res_reg) {
- __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
+ DCHECK_EQ(MachineRepresentation::kWord32,
+ sig.GetReturn(0).representation());
+ __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
}
}
- void I32UnOp(bool (LiftoffAssembler::*emit_fn)(Register, Register),
- ExternalReference (*fallback_fn)(Isolate*)) {
+ template <ValueType type, class EmitFn>
+ void EmitUnOp(EmitFn fn) {
+ static RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetUnaryOpTargetRegister(kGpReg));
- LiftoffRegister src_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- if (!emit_fn || !(asm_->*emit_fn)(dst_reg.gp(), src_reg.gp())) {
+ LiftoffRegister dst = pinned.set(__ GetUnaryOpTargetRegister(rc));
+ LiftoffRegister src = __ PopToRegister(rc, pinned);
+ fn(dst, src);
+ __ PushRegister(type, dst);
+ }
+
+ void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
+ Register),
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
+ if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn(asm_->isolate());
- Register args[] = {src_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- }
- __ PushRegister(kWasmI32, dst_reg);
+ Register args[] = {src.gp()};
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref);
+ };
+ EmitUnOp<kWasmI32>(emit_with_c_fallback);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
-#define CASE_UNOP(opcode, type, fn, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##UnOp(&LiftoffAssembler::emit_##fn, ext_ref_fn); \
+#define CASE_I32_UNOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.gp(), src.gp()); \
+ }); \
+ break;
+#define CASE_FLOAT_UNOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasm##type>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.fp(), src.fp()); \
+ }); \
break;
switch (opcode) {
- CASE_UNOP(I32Eqz, I32, i32_eqz, nullptr)
- CASE_UNOP(I32Clz, I32, i32_clz, nullptr)
- CASE_UNOP(I32Ctz, I32, i32_ctz, nullptr)
- CASE_UNOP(I32Popcnt, I32, i32_popcnt,
- &ExternalReference::wasm_word32_popcnt)
+ CASE_I32_UNOP(I32Clz, i32_clz)
+ CASE_I32_UNOP(I32Ctz, i32_ctz)
+ case kExprI32Popcnt:
+ EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
+ &ExternalReference::wasm_word32_popcnt);
+ break;
+ case kExprI32Eqz:
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32_set_cond(kEqual, dst.gp(), src.gp());
+ });
+ break;
+ CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
+ CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_UNOP
+#undef CASE_I32_UNOP
+#undef CASE_FLOAT_UNOP
}
- void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
- Register)) {
+ template <ValueType type, typename EmitFn>
+ void EmitMonomorphicBinOp(EmitFn fn) {
+ static constexpr RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- (asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
- __ PushRegister(kWasmI32, dst_reg);
+ LiftoffRegister dst = pinned.set(__ GetBinaryOpTargetRegister(rc));
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(rc, pinned));
+ LiftoffRegister lhs = __ PopToRegister(rc, pinned);
+ fn(dst, lhs, rhs);
+ __ PushRegister(type, dst);
}
- void I32CCallBinOp(ExternalReference ext_ref) {
+ template <ValueType result_type, RegClass src_rc, typename EmitFn>
+ void EmitBinOpWithDifferentResultType(EmitFn fn) {
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- Register args[] = {lhs_reg.gp(), rhs_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- __ PushRegister(kWasmI32, dst_reg);
- }
-
- void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
- DoubleRegister,
- DoubleRegister)) {
- LiftoffRegList pinned;
- LiftoffRegister target_reg =
- pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kFpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned);
- (asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
- __ PushRegister(kWasmF32, target_reg);
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister lhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister dst = __ GetUnusedRegister(reg_class_for(result_type));
+ fn(dst, lhs, rhs);
+ __ PushRegister(result_type, dst);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
-#define CASE_BINOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
- return type##BinOp(&LiftoffAssembler::emit_##fn);
-#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
- break;
+#define CASE_I32_BINOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_FLOAT_BINOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasm##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_I32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_F32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpWithDifferentResultType<kWasmI32, kFpReg>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_SHIFTOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
+ });
+#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ Register args[] = {lhs.gp(), rhs.gp()}; \
+ auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref); \
+ });
switch (opcode) {
- CASE_BINOP(I32Add, I32, i32_add)
- CASE_BINOP(I32Sub, I32, i32_sub)
- CASE_BINOP(I32Mul, I32, i32_mul)
- CASE_BINOP(I32And, I32, i32_and)
- CASE_BINOP(I32Ior, I32, i32_or)
- CASE_BINOP(I32Xor, I32, i32_xor)
- CASE_BINOP(I32Shl, I32, i32_shl)
- CASE_BINOP(I32ShrS, I32, i32_sar)
- CASE_BINOP(I32ShrU, I32, i32_shr)
+ CASE_I32_BINOP(I32Add, i32_add)
+ CASE_I32_BINOP(I32Sub, i32_sub)
+ CASE_I32_BINOP(I32Mul, i32_mul)
+ CASE_I32_BINOP(I32And, i32_and)
+ CASE_I32_BINOP(I32Ior, i32_or)
+ CASE_I32_BINOP(I32Xor, i32_xor)
+ CASE_I32_CMPOP(I32Eq, kEqual)
+ CASE_I32_CMPOP(I32Ne, kUnequal)
+ CASE_I32_CMPOP(I32LtS, kSignedLessThan)
+ CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
+ CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
+ CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
+ CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
+ CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
+ CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
+ CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
+ CASE_F32_CMPOP(F32Eq, kEqual)
+ CASE_F32_CMPOP(F32Ne, kUnequal)
+ CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
+ CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
+ CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
+ CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
+ CASE_SHIFTOP(I32Shl, i32_shl)
+ CASE_SHIFTOP(I32ShrS, i32_sar)
+ CASE_SHIFTOP(I32ShrU, i32_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
- CASE_BINOP(F32Add, F32, f32_add)
- CASE_BINOP(F32Sub, F32, f32_sub)
- CASE_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F32Add, F32, f32_add)
+ CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
+ CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F64Add, F64, f64_add)
+ CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
+ CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_BINOP
+#undef CASE_I32_BINOP
+#undef CASE_FLOAT_BINOP
+#undef CASE_I32_CMPOP
+#undef CASE_F32_CMPOP
+#undef CASE_SHIFTOP
#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
- CheckStackSizeLimit(decoder);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
- unsupported(decoder, "i64.const");
+ // The {VarState} stores constant values as int32_t, thus we only store
+ // 64-bit constants in this field if it fits in an int32_t. Larger values
+ // cannot be used as immediate value anyway, so we can also just put them in
+ // a register immediately.
+ int32_t value_i32 = static_cast<int32_t>(value);
+ if (value_i32 == value) {
+ __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
+ } else {
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmI64, reg);
+ }
}
void F32Const(Decoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
- CheckStackSizeLimit(decoder);
}
void F64Const(Decoder* decoder, Value* result, double value) {
- unsupported(decoder, "f64.const");
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmF64, reg);
}
void Drop(Decoder* decoder, const Value& value) {
@@ -603,11 +729,11 @@ class LiftoffCompiler {
if (values.size() > 1) return unsupported(decoder, "multi-return");
RegClass rc = reg_class_for(values[0].type);
LiftoffRegister reg = __ PopToRegister(rc);
- __ MoveToReturnRegister(reg);
+ __ MoveToReturnRegister(reg, values[0].type);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
- static_cast<uint32_t>(call_desc_->StackParameterCount()));
+ static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
@@ -618,37 +744,36 @@ class LiftoffCompiler {
case kRegister:
__ PushRegister(slot.type(), slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ cache_state()->stack_state.emplace_back(operand.type,
slot.i32_const());
break;
case kStack: {
auto rc = reg_class_for(operand.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
- __ Fill(reg, operand.index);
+ __ Fill(reg, operand.index, operand.type);
__ PushRegister(slot.type(), reg);
break;
}
}
- CheckStackSizeLimit(decoder);
}
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
+ ValueType type = dst_slot.type();
if (dst_slot.is_reg()) {
LiftoffRegister slot_reg = dst_slot.reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1);
+ __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
}
- ValueType type = dst_slot.type();
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
- __ Fill(dst_reg, __ cache_state()->stack_height() - 1);
+ __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -663,7 +788,7 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
@@ -701,7 +826,6 @@ class LiftoffCompiler {
return unsupported(decoder, "global > kPointerSize");
__ Load(value, addr, no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
- CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
@@ -742,16 +866,76 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, &cont_false);
+ __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
Br(target);
__ bind(&cont_false);
}
+ // Generate a branch table case, potentially reusing previously generated
+ // stack transfer code.
+ void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ MovableLabel& label = br_targets[br_depth];
+ if (label.get()->is_bound()) {
+ __ jmp(label.get());
+ } else {
+ __ bind(label.get());
+ Br(decoder->control_at(br_depth));
+ }
+ }
+
+ // Generate a branch table for input in [min, max).
+ // TODO(wasm): Generate a real branch table (like TF TableSwitch).
+ void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
+ LiftoffRegister value, uint32_t min, uint32_t max,
+ BranchTableIterator<validate>& table_iterator,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ DCHECK_LT(min, max);
+ // Check base case.
+ if (max == min + 1) {
+ DCHECK_EQ(min, table_iterator.cur_index());
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ return;
+ }
+
+ uint32_t split = min + (max - min) / 2;
+ Label upper_half;
+ __ LoadConstant(tmp, WasmValue(split));
+ __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
+ tmp.gp());
+ // Emit br table for lower half:
+ GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
+ br_targets);
+ __ bind(&upper_half);
+ // Emit br table for upper half:
+ GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
+ br_targets);
+ }
+
void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
- unsupported(decoder, "br_table");
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(kGpReg));
+ BranchTableIterator<validate> table_iterator(decoder, operand);
+ std::map<uint32_t, MovableLabel> br_targets;
+
+ if (operand.table_count > 0) {
+ LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadConstant(tmp, WasmValue(uint32_t{operand.table_count}));
+ Label case_default;
+ __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
+ value.gp(), tmp.gp());
+
+ GenerateBrTable(decoder, tmp, value, 0, operand.table_count,
+ table_iterator, br_targets);
+
+ __ bind(&case_default);
+ }
+
+ // Generate the default case.
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK(!table_iterator.has_next());
}
void Else(Decoder* decoder, Control* if_block) {
@@ -760,28 +944,45 @@ class LiftoffCompiler {
__ cache_state()->Steal(if_block->else_state->state);
}
- Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
+ Label* AddOutOfLineTrap(wasm::WasmCodePosition position,
+ Builtins::Name builtin, uint32_t pc = 0) {
DCHECK(!FLAG_wasm_no_bounds_checks);
- // The pc is needed exactly if trap handlers are enabled.
- DCHECK_EQ(pc != 0, env_->use_trap_handler);
+ // The pc is needed for memory OOB trap with trap handler enabled. Other
+ // callers should not even compute it.
+ DCHECK_EQ(pc != 0, builtin == Builtins::kThrowWasmTrapMemOutOfBounds &&
+ env_->use_trap_handler);
- out_of_line_code_.push_back(OutOfLineCode::Trap(
- Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
+ out_of_line_code_.push_back(OutOfLineCode::Trap(builtin, position, pc));
return out_of_line_code_.back().label.get();
}
- void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
- wasm::WasmCodePosition position, LiftoffRegList pinned) {
- DCHECK(!env_->use_trap_handler);
- if (FLAG_wasm_no_bounds_checks) return;
+ // Returns true if the memory access is statically known to be out of bounds
+ // (a jump to the trap was generated then); return false otherwise.
+ bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
+ Register index, LiftoffRegList pinned) {
+ const bool statically_oob =
+ access_size > max_size_ || offset > max_size_ - access_size;
+
+ if (!statically_oob &&
+ (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
+ return false;
+ }
- Label* trap_label = AddOutOfLineTrap(position);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds);
- if (access_size > max_size_ || offset > max_size_ - access_size) {
- // The access will be out of bounds, even for the largest memory.
+ if (statically_oob) {
__ emit_jump(trap_label);
- return;
+ Control* current_block = decoder->control_at(0);
+ if (current_block->reachable()) {
+ current_block->reachability = kSpecOnlyReachable;
+ }
+ return true;
}
+
+ DCHECK(!env_->use_trap_handler);
+ DCHECK(!FLAG_wasm_no_bounds_checks);
+
uint32_t end_offset = offset + access_size - 1;
// If the end offset is larger than the smallest memory, dynamically check
@@ -793,8 +994,8 @@ class LiftoffCompiler {
__ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) {
- __ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
+ end_offset_reg.gp(), mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size.
@@ -802,8 +1003,9 @@ class LiftoffCompiler {
__ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
- __ emit_i32_compare(index, effective_size_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
+ effective_size_reg.gp());
+ return false;
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
@@ -842,22 +1044,23 @@ class LiftoffCompiler {
}
void GenerateRuntimeCall(int num_args, Register* args) {
- compiler::CallDescriptor* desc =
- compiler::Linkage::GetRuntimeCallDescriptor(
- compilation_zone_, Runtime::kWasmTraceMemory, num_args,
- compiler::Operator::kNoProperties,
- compiler::CallDescriptor::kNoFlags);
+ auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
+ compilation_zone_, Runtime::kWasmTraceMemory, num_args,
+ compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
// Currently, only one argument is supported. More arguments require some
// caution for the parallel register moves (reuse StackTransferRecipe).
DCHECK_EQ(1, num_args);
constexpr size_t kInputShift = 1; // Input 0 is the call target.
- compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
+ compiler::LinkageLocation param_loc =
+ call_descriptor->GetInputLocation(kInputShift);
if (param_loc.IsRegister()) {
Register reg = Register::from_code(param_loc.AsRegister());
- __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
+ __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
- __ PushCallerFrameSlot(LiftoffRegister(args[0]));
+ __ PushCallerFrameSlot(LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
}
// Allocate the codegen zone if not done before.
@@ -873,14 +1076,11 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported load type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -890,10 +1090,11 @@ class LiftoffCompiler {
__ Load(value, addr, index, operand.offset, type, pinned,
&protected_load_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_load_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
}
__ PushRegister(value_type, value);
- CheckStackSizeLimit(decoder);
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -905,16 +1106,13 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported store type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -922,7 +1120,9 @@ class LiftoffCompiler {
__ Store(addr, index, operand.offset, value, type, pinned,
&protected_store_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_store_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
}
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
@@ -942,11 +1142,17 @@ class LiftoffCompiler {
const Value args[], Value returns[]) {
if (operand.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return"))
+ return;
- compiler::CallDescriptor* call_desc =
+ auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- __ PrepareCall(operand.sig, call_desc);
+ __ PrepareCall(operand.sig, call_descriptor);
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -965,14 +1171,166 @@ class LiftoffCompiler {
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ FinishCall(operand.sig, call_desc);
+ __ FinishCall(operand.sig, call_descriptor);
}
- void CallIndirect(Decoder* decoder, const Value& index,
+ void CallIndirect(Decoder* decoder, const Value& index_val,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- unsupported(decoder, "call_indirect");
+ if (operand.sig->return_count() > 1) {
+ return unsupported(decoder, "multi-return");
+ }
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return")) {
+ return;
+ }
+
+ // Assume only one table for now.
+ uint32_t table_index = 0;
+
+ // Pop the index.
+ LiftoffRegister index = __ PopToRegister(kGpReg);
+ // If that register is still being used after popping, we move it to another
+ // register, because we want to modify that register.
+ if (__ cache_state()->is_used(index)) {
+ LiftoffRegister new_index =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
+ __ Move(new_index, index, kWasmI32);
+ index = new_index;
+ }
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ // Get three temporary registers.
+ LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp_const =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister* explicit_context = nullptr;
+
+ // Bounds check against the table size.
+ Label* invalid_func_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
+
+ static constexpr LoadType kPointerLoadType =
+ kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+ static constexpr int kFixedArrayOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag;
+
+ uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
+ DCHECK_GE(canonical_sig_num, 0);
+ DCHECK_GE(kMaxInt, canonical_sig_num);
+
+ if (WASM_CONTEXT_TABLES) {
+ // Compare against table size stored in {wasm_context->table_size}.
+ __ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size),
+ sizeof(uint32_t));
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+ // Load the table from {wasm_context->table}
+ __ LoadFromContext(table.gp(), offsetof(WasmContext, table),
+ kPointerSize);
+ // Load the signature from {wasm_context->table[$index].sig_id}
+ // == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry)
+ // + #offsetof(sig_id)
+ __ LoadConstant(
+ tmp_const,
+ WasmValue(static_cast<uint32_t>(sizeof(IndirectFunctionTableEntry))));
+ __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load,
+ pinned);
+
+ __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load the target address from {wasm_context->table[$index].target}
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, target), kPointerLoadType,
+ pinned);
+
+ // Load the context from {wasm_context->table[$index].context}
+ // TODO(wasm): directly allocate the correct context register to avoid
+ // any potential moves.
+ __ Load(tmp_const, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, context), kPointerLoadType,
+ pinned);
+ explicit_context = &tmp_const;
+ } else {
+ // Compare against table size, which is a patchable constant.
+ uint32_t table_size =
+ env_->module->function_tables[table_index].initial_size;
+
+ __ LoadConstant(tmp_const, WasmValue(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+
+ wasm::GlobalHandleAddress function_table_handle_address =
+ env_->function_tables[table_index];
+ __ LoadConstant(table, WasmPtrValue(function_table_handle_address),
+ RelocInfo::WASM_GLOBAL_HANDLE);
+ __ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0,
+ "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
+ __ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
+ // Shift index such that it's the offset of the signature in the
+ // FixedArray.
+ __ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
+
+ // Load the signature.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
+ kPointerLoadType, pinned);
+
+ __ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load code object.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
+ kPointerLoadType, pinned);
+
+ // Move the pointer from the Code object to the instruction start.
+ __ LoadConstant(tmp_const,
+ WasmPtrValue(Code::kHeaderSize - kHeapObjectTag));
+ __ emit_ptrsize_add(scratch.gp(), scratch.gp(), tmp_const.gp());
+ }
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ auto call_descriptor =
+ compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+
+ Register target = scratch.gp();
+ __ PrepareCall(operand.sig, call_descriptor, &target, explicit_context);
+ __ CallIndirect(operand.sig, call_descriptor, target);
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_descriptor);
}
+
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
@@ -1009,11 +1367,11 @@ class LiftoffCompiler {
private:
LiftoffAssembler* const asm_;
- compiler::CallDescriptor* const call_desc_;
+ compiler::CallDescriptor* const descriptor_;
compiler::ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
- const uint32_t min_size_;
- const uint32_t max_size_;
+ const uint64_t min_size_;
+ const uint64_t max_size_;
const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
@@ -1027,6 +1385,9 @@ class LiftoffCompiler {
// code generation (in FinishCompilation).
std::unique_ptr<Zone>* codegen_zone_;
SafepointTableBuilder safepoint_table_builder_;
+ // The pc offset of the instructions to reserve the stack frame. Needed to
+ // patch the actually needed stack size in the end.
+ uint32_t pc_offset_stack_frame_construction_ = 0;
void TraceCacheState(Decoder* decoder) const {
#ifdef DEBUG
@@ -1061,11 +1422,11 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module = env_ ? env_->module : nullptr;
- auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, counters()->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_,
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_descriptor, env_,
runtime_exception_support_,
&liftoff_.source_position_table_builder_,
protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index bb5ef5be4a..eedbf54a17 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,24 +16,29 @@ namespace v8 {
namespace internal {
namespace wasm {
-enum RegClass { kNoReg, kGpReg, kFpReg };
+static constexpr bool kNeedI64RegPair = kPointerSize == 4;
+
+enum RegClass : uint8_t {
+ kGpReg,
+ kFpReg,
+ // {kGpRegPair} equals {kNoReg} if {kNeedI64RegPair} is false.
+ kGpRegPair,
+ kNoReg = kGpRegPair + kNeedI64RegPair
+};
+
+enum RegPairHalf : uint8_t { kLowWord, kHighWord };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
+ return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
+ ? kGpRegPair
+ : type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
}
-// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
-// for all other values of rc.
-template <RegClass rc>
-using RegForClass = typename std::conditional<
- rc == kGpReg, Register,
- typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
-
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
@@ -47,14 +50,28 @@ static constexpr int kMaxFpRegCode =
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
+// I64 values on 32 bit platforms are stored in two registers, both encoded in
+// the same LiftoffRegister value.
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
-static_assert(kAfterMaxLiftoffRegCode < 256,
- "liftoff register codes can be stored in one uint8_t");
+static constexpr int kBitsPerLiftoffRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kAfterMaxLiftoffRegCode - 1);
+static constexpr int kBitsPerGpRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kMaxGpRegCode);
+static constexpr int kBitsPerGpRegPair = 1 + 2 * kBitsPerGpRegCode;
class LiftoffRegister {
+ static constexpr int needed_bits =
+ Max(kNeedI64RegPair ? kBitsPerGpRegPair : 0, kBitsPerLiftoffRegCode);
+ using storage_t = std::conditional<
+ needed_bits <= 8, uint8_t,
+ std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
+ static_assert(8 * sizeof(storage_t) >= needed_bits &&
+ 8 * sizeof(storage_t) < 2 * needed_bits,
+ "right type has been chosen");
+
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
DCHECK_EQ(reg, gp());
@@ -67,6 +84,7 @@ class LiftoffRegister {
static LiftoffRegister from_liftoff_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
+ DCHECK_EQ(code, static_cast<storage_t>(code));
return LiftoffRegister(code);
}
@@ -81,12 +99,40 @@ class LiftoffRegister {
}
}
+ static LiftoffRegister ForPair(LiftoffRegister low, LiftoffRegister high) {
+ DCHECK(kNeedI64RegPair);
+ DCHECK_NE(low, high);
+ storage_t combined_code = low.gp().code() |
+ high.gp().code() << kBitsPerGpRegCode |
+ 1 << (2 * kBitsPerGpRegCode);
+ return LiftoffRegister(combined_code);
+ }
+
+ constexpr bool is_pair() const {
+ return kNeedI64RegPair && (code_ & (1 << (2 * kBitsPerGpRegCode))) != 0;
+ }
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&
code_ < kAfterMaxLiftoffFpRegCode;
}
+ LiftoffRegister low() const { return LiftoffRegister(low_gp()); }
+
+ LiftoffRegister high() const { return LiftoffRegister(high_gp()); }
+
+ Register low_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code(code_ & kCodeMask);
+ }
+
+ Register high_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code((code_ >> kBitsPerGpRegCode) & kCodeMask);
+ }
+
Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
@@ -97,31 +143,46 @@ class LiftoffRegister {
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- int liftoff_code() const { return code_; }
+ uint32_t liftoff_code() const {
+ DCHECK(is_gp() || is_fp());
+ return code_;
+ }
RegClass reg_class() const {
- DCHECK(is_gp() || is_fp());
- return is_gp() ? kGpReg : kFpReg;
+ return is_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
}
bool operator==(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ == other.code_;
}
bool operator!=(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ != other.code_;
}
+ bool overlaps(const LiftoffRegister other) const {
+ if (is_pair()) return low().overlaps(other) || high().overlaps(other);
+ if (other.is_pair()) return *this == other.low() || *this == other.high();
+ return *this == other;
+ }
private:
- uint8_t code_;
+ storage_t code_;
- explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
+ explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
"LiftoffRegister can efficiently be passed by value");
inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
- return reg.is_gp() ? os << "gp" << reg.gp().code()
- : os << "fp" << reg.fp().code();
+ if (reg.is_pair()) {
+ return os << "<gp" << reg.low_gp().code() << "+" << reg.high_gp().code()
+ << ">";
+ } else if (reg.is_gp()) {
+ return os << "gp" << reg.gp().code();
+ } else {
+ return os << "fp" << reg.fp().code();
+ }
}
class LiftoffRegList {
@@ -144,16 +205,30 @@ class LiftoffRegList {
}
LiftoffRegister set(LiftoffRegister reg) {
- regs_ |= storage_t{1} << reg.liftoff_code();
+ if (reg.is_pair()) {
+ regs_ |= storage_t{1} << reg.low().liftoff_code();
+ regs_ |= storage_t{1} << reg.high().liftoff_code();
+ } else {
+ regs_ |= storage_t{1} << reg.liftoff_code();
+ }
return reg;
}
LiftoffRegister clear(LiftoffRegister reg) {
- regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ if (reg.is_pair()) {
+ regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
+ regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
+ } else {
+ regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ }
return reg;
}
bool has(LiftoffRegister reg) const {
+ if (reg.is_pair()) {
+ DCHECK_EQ(has(reg.low()), has(reg.high()));
+ reg = reg.low();
+ }
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
@@ -211,7 +286,7 @@ class LiftoffRegList {
template <typename... Regs>
static LiftoffRegList ForRegs(Regs... regs) {
std::array<LiftoffRegister, sizeof...(regs)> regs_arr{
- LiftoffRegister(regs)...};
+ {LiftoffRegister(regs)...}};
LiftoffRegList list;
for (LiftoffRegister reg : regs_arr) list.set(reg);
return list;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 50ab1e82c8..fda98aea62 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -2,180 +2,542 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
+#define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ addiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.addiu(sp, sp, -bytes);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(reg.low_gp(), Operand(low_word));
+ TurboAssembler::li(reg.high_gp(), Operand(high_word));
+ break;
+ }
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ lw(dst, liftoff::GetContextOperand());
+ DCHECK_EQ(4, size);
+ lw(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sw(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ lw(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register src = no_reg;
+ if (offset_reg != no_reg) {
+ src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ }
+ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
+ : MemOperand(src_addr, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8U:
+ lbu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8S:
+ lb(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ MemOperand src_op_upper = (offset_reg != no_reg)
+ ? MemOperand(src, offset_imm + 4)
+ : MemOperand(src_addr, offset_imm + 4);
+ TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ break;
+ }
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store: {
+ MemOperand dst_op_upper = (offset_reg != no_reg)
+ ? MemOperand(dst, offset_imm + 4)
+ : MemOperand(dst_addr, offset_imm + 4);
+ TurboAssembler::Usw(src.high_gp(), dst_op_upper);
+ TurboAssembler::Usw(src.low_gp(), dst_op);
+ break;
+ }
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
+ : reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::mov(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sw(reg.low_gp(), dst);
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister low = GetUnusedRegister(kGpReg);
+ LiftoffRegister high = GetUnusedRegister(kGpReg);
+
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(low.gp(), Operand(low_word));
+ TurboAssembler::li(high.gp(), Operand(high_word));
+
+ sw(low.gp(), dst);
+ sw(high.gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ lw(reg.low_gp(), src);
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ lw(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ emit_i32_add(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index fd63198e24..d215f4178c 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -2,180 +2,487 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
+
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ daddiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.daddiu(sp, sp, -bytes);
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ ld(dst, liftoff::GetContextOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ lw(dst, MemOperand(dst, offset));
+ } else {
+ ld(dst, MemOperand(dst, offset));
+ }
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sd(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ ld(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ MemOperand src_op(src_addr, offset_imm);
+ if (offset_reg != no_reg) {
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ src_op = MemOperand(src, offset_imm);
+ }
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulwu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Uld(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::Usd(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ LiftoffRegister dst = reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sd(reg.gp(), dst);
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ sd(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ ld(reg.gp(), src);
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ TurboAssembler::Daddu(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 2d62d88dec..efbb6896d6 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
+#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("ppc " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index eebb8e4720..62145fadca 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
+#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("s390 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 2b3b750fc4..c1f316072d 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#ifndef V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
+#define V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
+// is located at rbp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
- // is located at rbp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- rbp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -31,25 +39,58 @@ inline Operand GetContextOperand() { return Operand(rbp, -16); }
// stack for a call to C.
static constexpr Register kCCallLastArgAddrReg = rax;
+inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
+ uint32_t offset_imm, LiftoffRegList pinned) {
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
+ if (offset == no_reg) return Operand(addr, offset_imm);
+ return Operand(addr, offset, times_1, offset_imm);
+}
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- subp(rsp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
+ if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
- movl(reg.gp(), Immediate(value.to_i32()));
+ movl(reg.gp(), Immediate(value.to_i32(), rmode));
+ }
+ break;
+ case kWasmI64:
+ if (RelocInfo::IsNone(rmode)) {
+ TurboAssembler::Set(reg.gp(), value.to_i64());
+ } else {
+ movq(reg.gp(), value.to_i64(), rmode);
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -79,42 +120,46 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- Operand src_op = offset_reg == no_reg
- ? Operand(src_addr, offset_imm)
- : Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- movl(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
+ Operand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
movzxbl(dst.gp(), src_op);
break;
case LoadType::kI32Load8S:
movsxbl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8S:
+ movsxbq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
movzxwl(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsxwl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16S:
+ movsxwq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
movl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32S:
+ movsxlq(dst.gp(), src_op);
+ break;
case LoadType::kI64Load:
movq(dst.gp(), src_op);
break;
case LoadType::kF32Load:
Movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ Movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -124,28 +169,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- Operand dst_op = offset_reg == no_reg
- ? Operand(dst_addr, offset_imm)
- : Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = GetUnusedRegister(kGpReg, pinned).gp();
- movl(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
+ Operand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
movb(dst_op, src.gp());
break;
case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
movw(dst_op, src.gp());
break;
case StoreType::kI32Store:
+ case StoreType::kI64Store32:
movl(dst_op, src.gp());
break;
case StoreType::kI64Store:
@@ -154,72 +191,118 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kF32Store:
Movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ Movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src);
- } else {
- Movsd(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(dst.gp(), src);
+ break;
+ case kWasmI64:
+ movq(dst.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src.gp());
+ if (type == kWasmI32) {
+ movl(dst, src);
} else {
- Movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmI64, type);
+ movq(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(dst, reg.gp());
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ Movss(dst, src);
} else {
- Movsd(dst, reg.fp());
+ DCHECK_EQ(kWasmF64, type);
+ Movsd(dst, src);
+ }
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ movl(dst, reg.gp());
+ break;
+ case kWasmI64:
+ movq(dst, reg.gp());
+ break;
+ case kWasmF32:
+ Movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ Movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ // We could use movq, but this would require a temporary register. For
+ // simplicity (and to avoid potentially having to spill another register),
+ // we use two movl instructions.
+ int32_t low_word = static_cast<int32_t>(value.to_i64());
+ int32_t high_word = static_cast<int32_t>(value.to_i64() >> 32);
+ movl(dst, Immediate(low_word));
+ movl(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
movl(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -228,16 +311,31 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(reg.gp(), src);
- } else {
- Movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(reg.gp(), src);
+ break;
+ case kWasmI64:
+ movq(reg.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
@@ -279,7 +377,8 @@ COMMUTATIVE_I32_BINOP(xor, xor)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
assm->movl(kScratchRegister, lhs);
@@ -293,9 +392,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// register. If lhs is rcx, lhs is now the scratch register.
bool use_scratch = false;
if (rhs != rcx) {
- use_scratch =
- lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
- if (use_scratch) assm->movl(kScratchRegister, rcx);
+ use_scratch = lhs == rcx ||
+ assm->cache_state()->is_used(LiftoffRegister(rcx)) ||
+ pinned.has(LiftoffRegister(rcx));
+ if (use_scratch) assm->movq(kScratchRegister, rcx);
if (lhs == rcx) lhs = kScratchRegister;
assm->movl(rcx, rhs);
}
@@ -305,27 +405,23 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
(assm->*emit_shift)(dst);
// Restore rcx if needed.
- if (use_scratch) assm->movl(rcx, kScratchRegister);
+ if (use_scratch) assm->movq(rcx, kScratchRegister);
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- testl(src, src);
- setcc(zero, dst);
- movzxbl(dst, dst);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -419,18 +515,128 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmpl(lhs, rhs);
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmpl(lhs, rhs);
+ break;
+ case kWasmI64:
+ cmpq(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ testl(lhs, lhs);
+ }
+
j(cond, label);
}
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmpl(lhs, rhs);
+ } else {
+ testl(lhs, lhs);
+ }
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ Ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ movl(dst, Immediate(1));
+ } else {
+ xorl(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
Register limit = GetUnusedRegister(kGpReg).gp();
LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
@@ -449,26 +655,37 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index, RegPairHalf) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ PushCallerFrameSlot(src.reg(), src.type());
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
pushq(Immediate(src.i32_const()));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- pushq(reg.gp());
- } else {
- subp(rsp, Immediate(kPointerSize));
- Movsd(Operand(rsp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ case kWasmI64:
+ pushq(reg.gp());
+ break;
+ case kWasmF32:
+ subp(rsp, Immediate(kPointerSize));
+ Movss(Operand(rsp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ subp(rsp, Immediate(kPointerSize));
+ Movsd(Operand(rsp, 0), reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -552,6 +769,16 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ popq(kScratchRegister);
+ target = kScratchRegister;
+ }
+ call(target);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
movp(addr, rsp);
@@ -565,4 +792,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#endif // V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
index a19a228f1f..4779a9f423 100644
--- a/deps/v8/src/wasm/compilation-manager.cc
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -4,6 +4,7 @@
#include "src/wasm/compilation-manager.h"
#include "src/base/template-utils.h"
+#include "src/wasm/module-compiler.h"
#include "src/objects-inl.h"
@@ -46,6 +47,15 @@ std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
void CompilationManager::TearDown() { jobs_.clear(); }
+void CompilationManager::AbortAllJobs() {
+ // Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
+ std::vector<AsyncCompileJob*> copy;
+
+ for (auto entry : jobs_) copy.push_back(entry.first);
+
+ for (auto job : copy) job->Abort();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
index e359b11c26..279f3e872e 100644
--- a/deps/v8/src/wasm/compilation-manager.h
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -9,12 +9,13 @@
#include "src/handles.h"
#include "src/isolate.h"
-#include "src/wasm/module-compiler.h"
namespace v8 {
namespace internal {
namespace wasm {
+class AsyncCompileJob;
+
// The CompilationManager manages a list of active WebAssembly compile jobs. The
// manager owns the memory of the compile jobs and can trigger the abortion of
// compile jobs. If the isolate tears down, the CompilationManager makes sure
@@ -29,11 +30,17 @@ class CompilationManager {
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
- // Removes {job} from the list of active compile jobs.
+ // Remove {job} from the list of active compile jobs.
std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
+ // Cancel all AsyncCompileJobs and delete their state immediately.
void TearDown();
+ // Cancel all AsyncCompileJobs so that they are not processed any further,
+ // but delay the deletion of their state until all tasks accessing the
+ // AsyncCompileJob finish their execution.
+ void AbortAllJobs();
+
private:
AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
std::unique_ptr<byte[]> bytes_copy,
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 04d918b0a4..98aad07fcb 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -37,6 +37,12 @@ struct WasmException;
return true; \
}())
+#define RET_ON_PROTOTYPE_OPCODE(flag) \
+ DCHECK(!this->module_ || !this->module_->is_asm_js()); \
+ if (!FLAG_experimental_wasm_##flag) { \
+ this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ }
+
#define CHECK_PROTOTYPE_OPCODE(flag) \
DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
@@ -50,25 +56,25 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \
- V(I32AtomicAdd, Uint32) \
- V(I32AtomicSub, Uint32) \
- V(I32AtomicAnd, Uint32) \
- V(I32AtomicOr, Uint32) \
- V(I32AtomicXor, Uint32) \
- V(I32AtomicExchange, Uint32) \
V(I32AtomicLoad8U, Uint8) \
- V(I32AtomicAdd8U, Uint8) \
- V(I32AtomicSub8U, Uint8) \
- V(I32AtomicAnd8U, Uint8) \
- V(I32AtomicOr8U, Uint8) \
- V(I32AtomicXor8U, Uint8) \
- V(I32AtomicExchange8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
+ V(I32AtomicSub, Uint32) \
+ V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
+ V(I32AtomicAnd, Uint32) \
+ V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
+ V(I32AtomicOr, Uint32) \
+ V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
+ V(I32AtomicXor, Uint32) \
+ V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
+ V(I32AtomicExchange, Uint32) \
+ V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \
V(I32AtomicCompareExchange, Uint32) \
V(I32AtomicCompareExchange8U, Uint8) \
@@ -246,12 +252,12 @@ struct BreakDepthOperand {
template <Decoder::ValidateFlag validate>
struct CallIndirectOperand {
uint32_t table_index;
- uint32_t index;
+ uint32_t sig_index;
FunctionSig* sig = nullptr;
unsigned length = 0;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
if (!VALIDATE(table_index == 0)) {
@@ -648,7 +654,8 @@ class WasmDecoder : public Decoder {
uint32_t count = decoder->consume_u32v("local count");
if (decoder->failed()) return false;
- if ((count + type_list->size()) > kV8MaxWasmFunctionLocals) {
+ DCHECK_LE(type_list->size(), kV8MaxWasmFunctionLocals);
+ if (count > kV8MaxWasmFunctionLocals - type_list->size()) {
decoder->error(decoder->pc() - 1, "local count too large");
return false;
}
@@ -674,7 +681,7 @@ class WasmDecoder : public Decoder {
type = kWasmS128;
break;
}
- // else fall through to default.
+ V8_FALLTHROUGH;
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -789,10 +796,10 @@ class WasmDecoder : public Decoder {
inline bool Complete(const byte* pc, CallIndirectOperand<validate>& operand) {
if (!VALIDATE(module_ != nullptr &&
- operand.index < module_->signatures.size())) {
+ operand.sig_index < module_->signatures.size())) {
return false;
}
- operand.sig = module_->signatures[operand.index];
+ operand.sig = module_->signatures[operand.sig_index];
return true;
}
@@ -802,7 +809,7 @@ class WasmDecoder : public Decoder {
return false;
}
if (!Complete(pc, operand)) {
- errorf(pc + 1, "invalid signature index: #%u", operand.index);
+ errorf(pc + 1, "invalid signature index: #%u", operand.sig_index);
return false;
}
return true;
@@ -1097,6 +1104,7 @@ class WasmDecoder : public Decoder {
}
}
}
+ V8_FALLTHROUGH;
}
default:
V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
@@ -1534,8 +1542,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(Br, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
EndControl();
break;
@@ -1543,28 +1553,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(BrIf, cond, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrIf, cond, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
break;
}
case kExprBrTable: {
BranchTableOperand<validate> operand(this, this->pc_);
BranchTableIterator<validate> iterator(this, operand);
- if (!this->Validate(this->pc_, operand, control_.size())) break;
auto key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, operand, control_.size())) break;
uint32_t br_arity = 0;
+ std::vector<bool> br_targets(control_.size());
while (iterator.has_next()) {
const uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
if (!VALIDATE(target < control_.size())) {
- this->error(pos, "improper branch in br_table");
+ this->errorf(pos,
+ "improper branch in br_table target %u (depth %u)",
+ i, target);
break;
}
+ // Avoid redundant break target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
// Check that label types match up.
Control* c = control_at(target);
uint32_t arity = c->br_merge()->arity;
@@ -1572,15 +1592,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
br_arity = arity;
} else if (!VALIDATE(br_arity == arity)) {
this->errorf(pos,
- "inconsistent arity in br_table target %d"
+ "inconsistent arity in br_table target %u"
" (previous was %u, this one %u)",
i, br_arity, arity);
}
if (!TypeCheckBreak(c)) break;
- BreakTo(c);
}
+ if (this->failed()) break;
+
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrTable, operand, key);
- CALL_INTERFACE_IF_REACHABLE(BrTable, operand, key);
+ for (uint32_t depth = control_depth(); depth-- > 0;) {
+ if (!br_targets[depth]) continue;
+ control_at(depth)->br_merge()->reached = true;
+ }
+ }
len = 1 + iterator.length();
EndControl();
@@ -2249,10 +2276,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- inline void BreakTo(Control* c) {
- if (control_.back().reachable()) c->br_merge()->reached = true;
- }
-
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
@@ -2344,6 +2367,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
+ RET_ON_PROTOTYPE_OPCODE(se);
+ }
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 57ee78f91c..217a5ff3b1 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -369,13 +369,13 @@ class WasmGraphBuildingInterface {
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, nullptr, operand, args, returns, false);
+ DoCall(decoder, nullptr, operand.sig, operand.index, args, returns);
}
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, index.node, operand, args, returns, true);
+ DoCall(decoder, index.node, operand.sig, operand.sig_index, args, returns);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -782,30 +782,29 @@ class WasmGraphBuildingInterface {
return result;
}
- template <typename Operand>
void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
- TFNode* index_node, const Operand& operand, const Value args[],
- Value returns[], bool is_indirect) {
- int param_count = static_cast<int>(operand.sig->parameter_count());
+ TFNode* index_node, FunctionSig* sig, uint32_t index,
+ const Value args[], Value returns[]) {
+ int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
arg_nodes[0] = index_node;
for (int i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- if (is_indirect) {
- builder_->CallIndirect(operand.index, arg_nodes, &return_nodes,
+ if (index_node) {
+ builder_->CallIndirect(index, arg_nodes, &return_nodes,
decoder->position());
} else {
- builder_->CallDirect(operand.index, arg_nodes, &return_nodes,
+ builder_->CallDirect(index, arg_nodes, &return_nodes,
decoder->position());
}
- int return_count = static_cast<int>(operand.sig->return_count());
+ int return_count = static_cast<int>(sig->return_count());
for (int i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
// The invoked function could have used grow_memory, so we need to
- // reload mem_size and mem_start
+ // reload mem_size and mem_start.
LoadContextIntoSsa(ssa_env_);
}
};
@@ -1002,7 +1001,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
- os << " // sig #" << operand.index;
+ os << " // sig #" << operand.sig_index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 4a2e610b99..0a09feddf2 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -207,18 +207,12 @@ class ModuleCompiler {
compiler::ModuleEnv* module_env,
ErrorThrower* thrower);
- static MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower,
- std::unique_ptr<WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- private:
MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes);
+ private:
Isolate* isolate_;
WasmModule* module_;
const std::shared_ptr<Counters> async_counters_;
@@ -268,7 +262,7 @@ class JSToWasmWrapperCache {
target->builtin_index() == Builtins::kIllegal ||
target->builtin_index() == Builtins::kWasmCompileLazy) {
it.rinfo()->set_target_address(
- isolate, wasm_code.GetCode()->instruction_start());
+ wasm_code.GetCode()->instruction_start());
break;
}
}
@@ -277,9 +271,9 @@ class JSToWasmWrapperCache {
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
DCHECK(!it.done());
it.rinfo()->set_js_to_wasm_address(
- isolate, wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
+ wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
}
return code;
}
@@ -308,11 +302,12 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+ MaybeHandle<JSArrayBuffer> memory);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
+ // Run the start function, if any.
+ bool ExecuteStartFunction();
private:
// Represents the initialized state of a table.
@@ -340,8 +335,8 @@ class InstanceBuilder {
Handle<WasmCompiledModule> compiled_module_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
+ Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
- WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
std::vector<SanitizedImport> sanitized_imports_;
const std::shared_ptr<Counters>& async_counters() const {
@@ -424,91 +419,6 @@ class InstanceBuilder {
Handle<WasmInstanceObject> instance);
};
-// TODO(titzer): move to wasm-objects.cc
-void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (FLAG_wasm_jit_to_native) {
- if (native_module) {
- TRACE("Finalizing %zu {\n", native_module->instance_id);
- } else {
- TRACE("Finalized already cleaned up compiled module\n");
- }
- } else {
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
-
- if (compiled_module->use_trap_handler()) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- DisallowHeapAllocation no_gc;
- FixedArray* code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Code* code = Code::cast(code_table->get(i));
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(
- Smi::FromInt(trap_handler::kInvalidIndex));
- }
- }
- }
- }
- WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
- }
-
- // weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. We still want to maintain the links between instances, to
- // release the WasmCompiledModule corresponding to the WasmModuleInstance
- // being finalized here.
- WasmModuleObject* wasm_module = nullptr;
- if (!weak_wasm_module->cleared()) {
- wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template = wasm_module->compiled_module();
-
- TRACE("chain before {\n");
- TRACE_CHAIN(current_template);
- TRACE("}\n");
-
- DCHECK(!current_template->has_prev_instance());
- if (current_template == compiled_module) {
- if (!compiled_module->has_next_instance()) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(compiled_module->next_instance());
- }
- }
- }
-
- compiled_module->RemoveFromChain();
-
- if (wasm_module != nullptr) {
- TRACE("chain after {\n");
- TRACE_CHAIN(wasm_module->compiled_module());
- TRACE("}\n");
- }
- compiled_module->reset_weak_owning_instance();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
-}
-
// This is used in ProcessImports.
// When importing other modules' exports, we need to ask
// the exporter for a WasmToWasm wrapper. To do that, we need to
@@ -517,8 +427,9 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
class SetOfNativeModuleModificationScopes final {
public:
void Add(NativeModule* module) {
- module->SetExecutable(false);
- native_modules_.insert(module);
+ if (native_modules_.insert(module).second) {
+ module->SetExecutable(false);
+ }
}
~SetOfNativeModuleModificationScopes() {
@@ -531,138 +442,28 @@ class SetOfNativeModuleModificationScopes final {
std::unordered_set<NativeModule*> native_modules_;
};
-} // namespace
-
-MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
+void EnsureWasmContextTable(WasmContext* wasm_context, int table_size) {
+ if (wasm_context->table) return;
+ wasm_context->table_size = table_size;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ calloc(table_size, sizeof(IndirectFunctionTableEntry)));
+ for (int i = 0; i < table_size; i++) {
+ wasm_context->table[i].sig_id = kInvalidSigIndex;
}
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, asm_js_script,
- asm_js_offset_table_bytes);
}
-MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
- ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kWasmOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, Handle<Script>(),
- Vector<const byte>());
-}
+} // namespace
-MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
- &InstanceFinalizer);
- return builder.Build();
-}
-
-MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
- MaybeHandle<WasmModuleObject> module = SyncCompile(isolate, thrower, bytes);
- DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
-
- return SyncInstantiate(isolate, thrower, module.ToHandleChecked(), imports,
- memory);
-}
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe = resolver->Reject(Utils::ToLocal(context),
- Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe =
- resolver->Resolve(Utils::ToLocal(context), Utils::ToLocal(result));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ auto instance = builder.Build();
+ if (!instance.is_null() && builder.ExecuteStartFunction()) {
+ return instance;
}
- ResolvePromise(isolate, handle(isolate->context()), promise,
- instance_object.ToHandleChecked());
-}
-
-void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes, bool is_shared) {
- if (!FLAG_wasm_async_compilation) {
- // Asynchronous compilation disabled; fall back on synchronous compilation.
- ErrorThrower thrower(isolate, "WasmCompile");
- MaybeHandle<WasmModuleObject> module_object;
- if (is_shared) {
- // Make a copy of the wire bytes to avoid concurrent modification.
- std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- i::wasm::ModuleWireBytes bytes_copy(copy.get(),
- copy.get() + bytes.length());
- module_object = SyncCompile(isolate, &thrower, bytes_copy);
- } else {
- // The wire bytes are not shared, OK to use them directly.
- module_object = SyncCompile(isolate, &thrower, bytes);
- }
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- ResolvePromise(isolate, handle(isolate->context()), promise, module);
- return;
- }
-
- if (FLAG_wasm_test_streaming) {
- std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_engine()
- ->compilation_manager()
- ->StartStreamingCompilation(isolate, handle(isolate->context()),
- promise);
- streaming_decoder->OnBytesReceived(bytes.module_bytes());
- streaming_decoder->Finish();
- return;
- }
- // Make a copy of the wire bytes in case the user program changes them
- // during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
- isolate, std::move(copy), bytes.length(), handle(isolate->context()),
- promise);
+ return {};
}
Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
@@ -845,6 +646,7 @@ Address CompileLazy(Isolate* isolate) {
int func_index = static_cast<int>(result->index());
if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) {
+ int patched = 0;
Handle<FixedArray> exp_deopt_data =
Handle<FixedArray>::cast(exp_deopt_data_entry);
@@ -854,22 +656,36 @@ Address CompileLazy(Isolate* isolate) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
// of <export_table, index> followed by undefined values. Use this
// information here to patch all export tables.
+ Address target = result->instructions().start();
Handle<Foreign> foreign_holder =
- isolate->factory()->NewForeign(result->instructions().start(), TENURED);
+ isolate->factory()->NewForeign(target, TENURED);
for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
DisallowHeapAllocation no_gc;
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- exp_table->set(compiler::FunctionTableCodeOffset(exp_index),
- *foreign_holder);
+
+ if (WASM_CONTEXT_TABLES) {
+ // TODO(titzer): patching of function tables for lazy compilation
+ // only works for a single instance.
+ instance->wasm_context()->get()->table[exp_index].target = target;
+ } else {
+ int table_index = compiler::FunctionTableCodeOffset(exp_index);
+ DCHECK_EQ(Foreign::cast(exp_table->get(table_index))->foreign_address(),
+ lazy_stub_or_copy->instructions().start());
+
+ exp_table->set(table_index, *foreign_holder);
+ ++patched;
+ }
}
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
// After processing, remove the list of exported entries, such that we don't
// do the patching redundantly.
compiled_module->lazy_compile_data()->set(
func_index, isolate->heap()->undefined_value());
+ if (!WASM_CONTEXT_TABLES) {
+ DCHECK_LT(0, patched);
+ USE(patched);
+ }
}
return result->instructions().start();
@@ -880,8 +696,7 @@ compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
DisallowHeapAllocation no_gc;
WasmModule* module = compiled_module->shared()->module();
if (FLAG_wasm_jit_to_native) {
- NativeModule* native_module = compiled_module->GetNativeModule();
- compiler::ModuleEnv result(module, native_module->function_tables(),
+ compiler::ModuleEnv result(module, std::vector<Address>{},
std::vector<Handle<Code>>{},
BUILTIN_CODE(isolate, WasmCompileLazy),
compiled_module->use_trap_handler());
@@ -911,6 +726,20 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
compilation_timer.Start();
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
+
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ WasmName name = Vector<const char>::cast(
+ compiled_module->shared()->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+
+ TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index);
+
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
static_cast<uint32_t>(func_index));
@@ -937,16 +766,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
FunctionBody body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- std::string func_name;
- {
- WasmName name = Vector<const char>::cast(
- compiled_module->shared()->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
- }
+
ErrorThrower thrower(isolate, "WasmLazyCompile");
compiler::WasmCompilationUnit unit(isolate, &module_env,
compiled_module->GetNativeModule(), body,
@@ -989,7 +809,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
if (!code_wrapper.IsCodeObject()) {
const wasm::WasmCode* wasm_code = code_wrapper.GetWasmCode();
- Assembler::FlushICache(isolate, wasm_code->instructions().start(),
+ Assembler::FlushICache(wasm_code->instructions().start(),
wasm_code->instructions().size());
counters->wasm_generated_code_size()->Increment(
static_cast<int>(wasm_code->instructions().size()));
@@ -997,8 +817,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
static_cast<int>(wasm_code->reloc_info().size()));
} else {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
@@ -1062,8 +881,12 @@ const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
wasm_to_wasm->constant_pool(), \
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
DCHECK(!it.done()); \
- it.rinfo()->set_js_to_wasm_address(isolate, \
- new_target->instructions().start()); \
+ DCHECK_EQ(WasmCode::kLazyStub, \
+ isolate->wasm_engine() \
+ ->code_manager() \
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) \
+ ->kind()); \
+ it.rinfo()->set_js_to_wasm_address(new_target->instructions().start()); \
it.next(); \
DCHECK(it.done()); \
} while (0)
@@ -1077,7 +900,7 @@ void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
DCHECK_EQ(Builtins::kWasmCompileLazy,
Code::GetCodeFromTargetAddress(it.rinfo()->target_address())
->builtin_index());
- it.rinfo()->set_target_address(isolate, new_target->instruction_start());
+ it.rinfo()->set_target_address(new_target->instruction_start());
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1169,8 +992,6 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
DCHECK(!non_compiled_functions.empty() || !wasm_to_wasm_callee.is_null());
}
- TRACE_LAZY("Compiling function %d.\n", func_to_return_idx);
-
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
CompileFunction(isolate, instance, func_to_return_idx);
@@ -1224,8 +1045,7 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
continue;
}
DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(isolate,
- callee_compiled->instruction_start());
+ it.rinfo()->set_target_address(callee_compiled->instruction_start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
@@ -1251,6 +1071,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
CompileFunction(isolate, instance, exported_func_index);
{
DisallowHeapAllocation no_gc;
+ int patched = 0;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
RelocIterator it(*js_to_wasm_caller,
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
@@ -1263,10 +1084,21 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
DCHECK_NOT_NULL(callee_compiled);
if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
+ ++patched;
} else {
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
+ ->kind());
it.rinfo()->set_js_to_wasm_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
+ ++patched;
}
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1313,6 +1145,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
->module()
->functions[caller_func_index]
.code.offset();
+ int num_non_compiled_functions = 0;
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1333,6 +1166,8 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
non_compiled_functions.push_back(Nothing<uint32_t>());
continue;
}
+ ++num_non_compiled_functions;
+
uint32_t called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(called_func_index,
@@ -1344,6 +1179,10 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
maybe_func_to_return_idx = Just(called_func_index);
}
}
+
+ TRACE_LAZY("Found %d non-compiled functions in caller.\n",
+ num_non_compiled_functions);
+ USE(num_non_compiled_functions);
}
uint32_t func_to_return_idx = 0;
@@ -1365,10 +1204,12 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
DCHECK_NOT_NULL(ret);
+ int patched = 0;
if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
// We can finish it all here by compiling the target wasm function and
// patching the wasm_to_wasm caller.
WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
+ ++patched;
} else {
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
@@ -1376,7 +1217,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
// Now patch the code object with all functions which are now compiled. This
// will pick up any other compiled functions, not only {ret}.
size_t idx = 0;
- size_t patched = 0;
for (RelocIterator
it(wasm_caller->instructions(), wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1388,13 +1228,22 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* callee_compiled =
compiled_module->GetNativeModule()->GetCode(lookup);
if (callee_compiled->kind() != WasmCode::kFunction) continue;
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address())
+ ->kind());
it.rinfo()->set_wasm_call_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
- TRACE_LAZY("Patched %zu location(s) in the caller.\n", patched);
}
+
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
return ret;
}
@@ -1679,8 +1528,7 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
-// static
-MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
@@ -1703,21 +1551,20 @@ bool compile_lazy(const WasmModule* module) {
(FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
}
-void FlushICache(Isolate* isolate, const wasm::NativeModule* native_module) {
+void FlushICache(const wasm::NativeModule* native_module) {
for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
const wasm::WasmCode* code = native_module->GetCode(i);
if (code == nullptr) continue;
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
+void FlushICache(Handle<FixedArray> functions) {
for (int i = 0, e = functions->length(); i < e; ++i) {
if (!functions->get(i)->IsCode()) continue;
Code* code = Code::cast(functions->get(i));
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
@@ -1811,7 +1658,8 @@ WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
return WasmCodeWrapper(code);
}
// Clone the lazy builtin into the native module.
- return WasmCodeWrapper(native_module->CloneLazyBuiltinInto(func_index));
+ return WasmCodeWrapper(
+ native_module->CloneLazyBuiltinInto(code, func_index));
}
}
@@ -1825,7 +1673,7 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<FixedArray> code_table, wasm::NativeModule* native_module,
uint32_t func_index, Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>* table_export_count) {
+ std::unordered_map<uint32_t, uint32_t>* num_table_exports) {
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code =
EnsureExportedLazyDeoptData(isolate, instance, code_table,
@@ -1845,10 +1693,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#4: export table
// #5: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
uint32_t this_idx = 2 * table_export_entry->second;
--table_export_entry->second;
@@ -1881,10 +1729,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#2: export table
// #3: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
--table_export_entry->second;
uint32_t this_idx = 2 * table_export_entry->second;
@@ -2192,19 +2040,17 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
return result;
}
-InstanceBuilder::InstanceBuilder(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback)
+InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
module_(module_object->compiled_module()->shared()->module()),
async_counters_(isolate->async_counters()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_(memory),
- instance_finalizer_callback_(instance_finalizer_callback) {
+ memory_(memory) {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -2222,12 +2068,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
// TODO(6792): No longer needed once WebAssembly code is off heap.
- // Use base::Optional to be able to close the scope before executing the start
- // function.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate_->heap());
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
// From here on, we expect the build pipeline to run without exiting to JS.
- // Exception is when we run the startup function.
DisallowJavascriptExecution no_js(isolate_);
// Record build time into correct bucket, then build instance.
TimedHistogramScope wasm_instantiate_module_time_scope(
@@ -2238,14 +2080,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
- // TODO(mtrofin): remove code_table and old_code_table
+ // TODO(mtrofin): remove code_table
// when FLAG_wasm_jit_to_native is not needed
Handle<FixedArray> code_table;
Handle<FixedArray> wrapper_table;
- // We keep around a copy of the old code table, because we'll be replacing
- // imports for the new instance, and then we need the old imports to be
- // able to relocate.
- Handle<FixedArray> old_code_table;
MaybeHandle<WasmInstanceObject> owner;
// native_module is the one we're building now, old_module
// is the one we clone from. They point to the same place if
@@ -2284,7 +2122,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
} else {
TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = handle(original->code_table(), isolate_);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
code_table = handle(compiled_module_->code_table(), isolate_);
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
@@ -2345,7 +2182,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
compiled_module_->GetNativeModule()->instance_id);
} else {
code_table = handle(compiled_module_->code_table(), isolate_);
- old_code_table = factory->CopyFixedArray(code_table);
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
@@ -2549,11 +2385,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
if (FLAG_wasm_jit_to_native) {
- FlushICache(isolate_, native_module);
+ FlushICache(native_module);
} else {
- FlushICache(isolate_, code_table);
+ FlushICache(code_table);
}
- FlushICache(isolate_, wrapper_table);
+ FlushICache(wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
@@ -2570,8 +2406,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Insert the compiled module into the weak list of compiled modules.
//--------------------------------------------------------------------------
{
- Handle<Object> global_handle =
- isolate_->global_handles()->Create(*instance);
Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
if (!owner.is_null()) {
// Publish the new instance to the instances chain.
@@ -2580,9 +2414,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
module_object_->set_compiled_module(*compiled_module_);
compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- instance_finalizer_callback_,
- v8::WeakCallbackType::kFinalizer);
+ WasmInstanceObject::InstallFinalizer(isolate_, instance);
}
//--------------------------------------------------------------------------
@@ -2607,41 +2439,20 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Execute the start function if one was specified.
+ // Create a wrapper for the start function.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
- HandleScope scope(isolate_);
int start_index = module_->start_function_index;
- WasmCodeWrapper startup_code = EnsureExportedLazyDeoptData(
+ WasmCodeWrapper start_code = EnsureExportedLazyDeoptData(
isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index,
+ isolate_, module_, start_code, start_index,
compiled_module_->use_trap_handler());
- Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+ start_function_ = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(startup_code, counters());
- // Call the JS function.
- Handle<Object> undefined = factory->undefined_value();
- // Close the modification scopes, so we can execute the start function.
- modification_scope.reset();
- native_module_modification_scope.reset();
- {
- // We're OK with JS execution here. The instance is fully setup.
- AllowJavascriptExecution allow_js(isolate_);
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- // It's unfortunate that the new instance is already linked in the
- // chain. However, we need to set up everything before executing the
- // startup unction, such that stack trace information can be generated
- // correctly already in the start function.
- return {};
- }
- }
+ RecordStats(start_code, counters());
}
DCHECK(!isolate_->has_pending_exception());
@@ -2655,6 +2466,22 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
return instance;
}
+bool InstanceBuilder::ExecuteStartFunction() {
+ if (start_function_.is_null()) return true; // No start function.
+
+ HandleScope scope(isolate_);
+ // Call the JS function.
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ return false;
+ }
+ return true;
+}
+
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
Handle<String> module_name,
@@ -2939,6 +2766,11 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
i += kFunctionTableEntrySize) {
table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
+ WasmContext* wasm_context = nullptr;
+ if (WASM_CONTEXT_TABLES) {
+ wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, imported_cur_size);
+ }
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_cur_size; ++i) {
@@ -2956,7 +2788,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
auto target = Handle<WasmExportedFunction>::cast(val);
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
FunctionSig* sig = nullptr;
Handle<Code> code =
MakeWasmToWasmWrapper(isolate_, target, nullptr, &sig,
@@ -2968,34 +2800,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(i), *code);
} else {
- const wasm::WasmCode* exported_code =
- target->GetWasmCode().GetWasmCode();
- wasm::NativeModule* exporting_module = exported_code->owner();
Handle<WasmInstanceObject> imported_instance =
handle(target->instance());
- imported_wasm_instances.Set(imported_instance, imported_instance);
+ const wasm::WasmCode* exported_code =
+ target->GetWasmCode().GetWasmCode();
FunctionSig* sig = imported_instance->module()
->functions[exported_code->index()]
.sig;
- wasm::WasmCode* wrapper_code =
- exporting_module->GetExportedWrapper(exported_code->index());
- if (wrapper_code == nullptr) {
- WasmContext* other_context =
- imported_instance->wasm_context()->get();
- Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
- isolate_, target->GetWasmCode(), sig,
- reinterpret_cast<Address>(other_context));
- set_of_native_module_scopes.Add(exporting_module);
- wrapper_code = exporting_module->AddExportedWrapper(
- wrapper, exported_code->index());
- }
- int sig_index = module_->signature_map.Find(sig);
- Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
- wrapper_code->instructions().start(), TENURED);
- table_instance.function_table->set(
- compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
- table_instance.function_table->set(
- compiler::FunctionTableCodeOffset(i), *foreign_holder);
+ auto& entry = wasm_context->table[i];
+ entry.context = imported_instance->wasm_context()->get();
+ entry.sig_id = module_->signature_map.Find(sig);
+ entry.target = exported_code->instructions().start();
}
}
@@ -3187,6 +3002,20 @@ void InstanceBuilder::ProcessExports(
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
Handle<JSFunction>::null());
+
+ // If an imported WebAssembly function gets exported, the exported function
+ // has to be identical to to imported function. Therefore we put all
+ // imported WebAssembly functions into the js_wrappers_ list.
+ for (int index = 0, end = static_cast<int>(module_->import_table.size());
+ index < end; ++index) {
+ WasmImport& import = module_->import_table[index];
+ if (import.kind == kExternalFunction) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ }
+ }
+ }
}
Handle<JSObject> exports_object;
@@ -3345,12 +3174,6 @@ void InstanceBuilder::InitializeTables(
Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
size_t function_table_count = module_->function_tables.size();
- std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
-
- wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
- std::vector<GlobalHandleAddress> empty;
- std::vector<GlobalHandleAddress>& old_function_tables =
- FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
Handle<FixedArray> old_function_tables_gc =
FLAG_wasm_jit_to_native
@@ -3372,9 +3195,7 @@ void InstanceBuilder::InitializeTables(
instance->set_function_tables(*rooted_function_tables);
- if (FLAG_wasm_jit_to_native) {
- DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
- } else {
+ if (!FLAG_wasm_jit_to_native) {
DCHECK_EQ(old_function_tables_gc->length(),
new_function_tables_gc->length());
}
@@ -3386,6 +3207,11 @@ void InstanceBuilder::InitializeTables(
int num_table_entries = static_cast<int>(table.initial_size);
int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, num_table_entries);
+ }
+
if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
table_instance.function_table =
@@ -3427,24 +3253,18 @@ void InstanceBuilder::InitializeTables(
GlobalHandleAddress new_func_table_addr = global_func_table.address();
GlobalHandleAddress old_func_table_addr;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
int_index, new_func_table_addr);
old_func_table_addr =
WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- } else {
- new_function_tables[int_index] = new_func_table_addr;
-
- old_func_table_addr = old_function_tables[int_index];
+ code_specialization->RelocatePointer(old_func_table_addr,
+ new_func_table_addr);
}
- code_specialization->RelocatePointer(old_func_table_addr,
- new_func_table_addr);
}
- if (FLAG_wasm_jit_to_native) {
- native_module->function_tables() = new_function_tables;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
compiled_module_->set_function_tables(*new_function_tables_gc);
}
}
@@ -3499,10 +3319,12 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
- uint32_t sig_index = module_->signature_ids[function->sig_index];
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
table_instance.function_table->set(
compiler::FunctionTableSigOffset(table_index),
- Smi::FromInt(sig_index));
+ Smi::FromInt(sig_id));
WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
isolate_, instance, code_table, native_module, func_index,
table_instance.function_table, table_index, &num_table_exports);
@@ -3517,7 +3339,17 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(table_index),
*value_to_update_with);
+
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ auto& entry = wasm_context->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = wasm_context;
+ entry.target = wasm_code.instructions().start();
+ }
+
if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->wasm wrappers for functions are
@@ -3546,31 +3378,10 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
}
table_instance.js_wrappers->set(table_index,
*js_wrappers_[func_index]);
- // When updating dispatch tables, we need to provide a wasm-to-wasm
- // wrapper for wasm_code - unless wasm_code is already a wrapper. If
- // it's a wasm-to-js wrapper, we don't need to construct a
- // wasm-to-wasm wrapper because there's no context switching required.
- // The remaining case is that it's a wasm-to-wasm wrapper, in which
- // case it's already doing "the right thing", and wrapping it again
- // would be redundant.
- if (func_index >= module_->num_imported_functions) {
- value_to_update_with = GetOrCreateIndirectCallWrapper(
- isolate_, instance, wasm_code, func_index, function->sig);
- } else {
- if (wasm_code.IsCodeObject()) {
- DCHECK(wasm_code.GetCode()->kind() == Code::WASM_TO_JS_FUNCTION ||
- wasm_code.GetCode()->kind() ==
- Code::WASM_TO_WASM_FUNCTION);
- } else {
- DCHECK(wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToJsWrapper ||
- wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToWasmWrapper);
- }
- }
- WasmTableObject::UpdateDispatchTables(table_instance.table_object,
- table_index, function->sig,
- value_to_update_with);
+ // UpdateDispatchTables() should update this instance as well.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ instance, wasm_code, func_index);
}
}
}
@@ -3686,14 +3497,18 @@ void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- RejectPromise(isolate_, context_, thrower, module_promise_);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Reject(module_promise_, thrower.Reify());
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- ResolvePromise(isolate_, context_, module_promise_, result);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Resolve(module_promise_, result);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
// A closure to run a compilation step (either as foreground or background
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 3a8b1972d6..b41ca28cea 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -23,34 +23,20 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes,
- bool is_shared);
-
-V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports);
-
-V8_EXPORT_PRIVATE void CompileJsToWasmWrappers(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- Counters* counters);
+V8_EXPORT_PRIVATE
+void CompileJsToWasmWrappers(Isolate* isolate,
+ Handle<WasmCompiledModule> compiled_module,
+ Counters* counters);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes);
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 010f191263..109b2fc230 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -270,7 +270,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = end_; // On error, terminate section decoding loop.
}
- void DumpModule(const ModuleResult& result) {
+ void DumpModule(const Vector<const byte> module_bytes) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -280,12 +280,13 @@ class ModuleDecoderImpl : public Decoder {
}
}
// File are named `HASH.{ok,failed}.wasm`.
- size_t hash = base::hash_range(start_, end_);
+ size_t hash = base::hash_range(module_bytes.start(), module_bytes.end());
EmbeddedVector<char, 32> buf;
- SNPrintF(buf, "%016zx.%s.wasm", hash, result.ok() ? "ok" : "failed");
+ SNPrintF(buf, "%016zx.%s.wasm", hash, ok() ? "ok" : "failed");
std::string name(buf.start());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
+ if (fwrite(module_bytes.start(), module_bytes.length(), 1, wasm_file) !=
+ 1) {
OFStream os(stderr);
os << "Error while dumping wasm file" << std::endl;
}
@@ -848,7 +849,6 @@ class ModuleDecoderImpl : public Decoder {
// Copy error code and location.
result.MoveErrorFrom(intermediate_result_);
}
- if (FLAG_dump_wasm_module) DumpModule(result);
return result;
}
@@ -856,6 +856,7 @@ class ModuleDecoderImpl : public Decoder {
ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) {
StartDecoding(isolate);
uint32_t offset = 0;
+ Vector<const byte> orig_bytes(start(), end() - start());
DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
@@ -878,6 +879,8 @@ class ModuleDecoderImpl : public Decoder {
section_iter.advance(true);
}
+ if (FLAG_dump_wasm_module) DumpModule(orig_bytes);
+
if (decoder.failed()) {
return decoder.toResult<std::unique_ptr<WasmModule>>(nullptr);
}
diff --git a/deps/v8/src/wasm/wasm-api.cc b/deps/v8/src/wasm/wasm-api.cc
deleted file mode 100644
index 4c51dc54cd..0000000000
--- a/deps/v8/src/wasm/wasm-api.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-api.h"
-
-#include "src/isolate-inl.h"
-#include "src/isolate.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-ScheduledErrorThrower::~ScheduledErrorThrower() {
- // There should never be both a pending and a scheduled exception.
- DCHECK(!isolate()->has_scheduled_exception() ||
- !isolate()->has_pending_exception());
- // Don't throw another error if there is already a scheduled error.
- if (isolate()->has_scheduled_exception()) {
- Reset();
- } else if (isolate()->has_pending_exception()) {
- Reset();
- isolate()->OptionalRescheduleException(false);
- } else if (error()) {
- isolate()->ScheduleThrow(*Reify());
- }
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-api.h b/deps/v8/src/wasm/wasm-api.h
deleted file mode 100644
index 464cdfa6f1..0000000000
--- a/deps/v8/src/wasm/wasm-api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_API_H_
-#define V8_WASM_API_H_
-
-#include "src/wasm/wasm-result.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Like an ErrorThrower, but turns all pending exceptions into scheduled
-// exceptions when going out of scope. Use this in API methods.
-// Note that pending exceptions are not necessarily created by the ErrorThrower,
-// but e.g. by the wasm start function. There might also be a scheduled
-// exception, created by another API call (e.g. v8::Object::Get). But there
-// should never be both pending and scheduled exceptions.
-class V8_EXPORT_PRIVATE ScheduledErrorThrower : public ErrorThrower {
- public:
- ScheduledErrorThrower(v8::Isolate* isolate, const char* context)
- : ScheduledErrorThrower(reinterpret_cast<Isolate*>(isolate), context) {}
-
- ScheduledErrorThrower(Isolate* isolate, const char* context)
- : ErrorThrower(isolate, context) {}
-
- ~ScheduledErrorThrower();
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_API_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2b8f309733..25f61d2e12 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -30,7 +30,6 @@ namespace internal {
namespace wasm {
namespace {
-size_t native_module_ids = 0;
#if V8_TARGET_ARCH_X64
#define __ masm->
@@ -71,10 +70,11 @@ void PatchTrampolineAndStubCalls(
#else
Address new_target = old_target;
#endif
- it.rinfo()->set_target_address(nullptr, new_target, SKIP_WRITE_BARRIER,
+ it.rinfo()->set_target_address(new_target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
}
}
+
} // namespace
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
@@ -212,18 +212,21 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate,
instructions().start() + instruction_size, nullptr);
os << "\n";
- Object* source_positions_or_undef =
- owner_->compiled_module()->source_positions()->get(index());
- if (!source_positions_or_undef->IsUndefined(isolate)) {
- os << "Source positions:\n pc offset position\n";
- for (SourcePositionTableIterator it(
- ByteArray::cast(source_positions_or_undef));
- !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
+ // Anonymous functions don't have source positions.
+ if (!IsAnonymous()) {
+ Object* source_positions_or_undef =
+ owner_->compiled_module()->source_positions()->get(index());
+ if (!source_positions_or_undef->IsUndefined(isolate)) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(
+ ByteArray::cast(source_positions_or_undef));
+ !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
os << "RelocInfo (size = " << reloc_size_ << ")\n";
@@ -268,10 +271,12 @@ WasmCode::~WasmCode() {
}
}
+base::AtomicNumber<size_t> NativeModule::next_id_;
+
NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* mem,
WasmCodeManager* code_manager)
- : instance_id(native_module_ids++),
+ : instance_id(next_id_.Increment(1)),
code_table_(num_functions),
num_imported_functions_(num_imports),
free_memory_(reinterpret_cast<Address>(mem->address()),
@@ -296,11 +301,6 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
source_positions = isolate->factory()->CopyFixedArrayAndGrow(
source_positions, grow_by, TENURED);
compiled_module()->set_source_positions(*source_positions);
- Handle<FixedArray> handler_table(compiled_module()->handler_table(),
- isolate);
- handler_table = isolate->factory()->CopyFixedArrayAndGrow(handler_table,
- grow_by, TENURED);
- compiled_module()->set_handler_table(*handler_table);
}
}
@@ -318,19 +318,24 @@ WasmCode* NativeModule::AddOwnedCode(
std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
// both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(orig_instructions.size());
- if (executable_buffer == nullptr) return nullptr;
+ if (executable_buffer == nullptr) {
+ V8::FatalProcessOutOfMemory("NativeModule::AddOwnedCode");
+ UNREACHABLE();
+ }
memcpy(executable_buffer, orig_instructions.start(),
orig_instructions.size());
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, std::move(protected_instructions), is_liftoff));
+ safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
@@ -339,8 +344,8 @@ WasmCode* NativeModule::AddOwnedCode(
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
- wasm_code_manager_->FlushICache(ret->instructions().start(),
- ret->instructions().size());
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
return ret;
}
@@ -348,12 +353,10 @@ WasmCode* NativeModule::AddOwnedCode(
WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, kind);
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
ret->index_ = Just(index);
compiled_module()->source_positions()->set(static_cast<int>(index),
code->source_position_table());
- compiled_module()->handler_table()->set(static_cast<int>(index),
- code->handler_table());
return ret;
}
@@ -364,15 +367,11 @@ WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
return ret;
}
-WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
- DCHECK_NULL(lazy_builtin_);
- lazy_builtin_ = AddAnonymousCode(code, WasmCode::kLazyStub);
-
+void NativeModule::SetLazyBuiltin(Handle<Code> code) {
+ WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
- SetCodeTable(i, lazy_builtin_);
+ code_table_[i] = lazy_builtin;
}
-
- return lazy_builtin_;
}
WasmCompiledModule* NativeModule::compiled_module() const {
@@ -392,13 +391,16 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
reloc_info.reset(new byte[code->relocation_size()]);
memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
}
+ std::shared_ptr<ProtectedInstructions> protected_instructions(
+ new ProtectedInstructions(0));
WasmCode* ret = AddOwnedCode(
{code->instruction_start(),
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
(code->has_safepoint_info() ? code->stack_slots() : 0),
- (code->has_safepoint_info() ? code->safepoint_table_offset() : 0), {});
+ (code->has_safepoint_info() ? code->safepoint_table_offset() : 0),
+ code->handler_table_offset(), protected_instructions, false);
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
@@ -411,8 +413,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Code* call_target =
Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
- it.rinfo()->set_target_address(nullptr,
- GetLocalAddressFor(handle(call_target)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(call_target)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
@@ -427,7 +428,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
@@ -441,11 +442,11 @@ WasmCode* NativeModule::AddCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, std::move(protected_instructions),
- is_liftoff);
+ frame_slots, safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff);
if (ret == nullptr) return nullptr;
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
// TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -467,12 +468,12 @@ WasmCode* NativeModule::AddCode(
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(nullptr, GetLocalAddressFor(handle(code)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(code)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(
- origin->isolate(), p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = ret->instructions().start() - desc.buffer;
it.rinfo()->apply(delta);
@@ -490,8 +491,7 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, {});
- if (wasm_code == nullptr) return nullptr;
+ Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, 0, {}, false);
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
return ret;
@@ -560,7 +560,7 @@ void NativeModule::Link(uint32_t index) {
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
DCHECK_NOT_NULL(target);
- it.rinfo()->set_wasm_call_address(nullptr, target_addr,
+ it.rinfo()->set_wasm_call_address(target_addr,
ICacheFlushMode::SKIP_ICACHE_FLUSH);
}
}
@@ -655,29 +655,29 @@ WasmCode* NativeModule::Lookup(Address pc) {
return nullptr;
}
-WasmCode* NativeModule::CloneLazyBuiltinInto(uint32_t index) {
- DCHECK_NOT_NULL(lazy_builtin());
- WasmCode* ret = CloneCode(lazy_builtin());
- SetCodeTable(index, ret);
+WasmCode* NativeModule::CloneLazyBuiltinInto(const WasmCode* code,
+ uint32_t index) {
+ DCHECK_EQ(wasm::WasmCode::kLazyStub, code->kind());
+ WasmCode* ret = CloneCode(code);
+ code_table_[index] = ret;
ret->index_ = Just(index);
return ret;
}
-bool NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
+void NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
for (auto& pair : other->trampolines_) {
Address key = pair.first;
Address local =
GetLocalAddressFor(handle(Code::GetCodeFromTargetAddress(key)));
- if (local == nullptr) return false;
+ DCHECK_NOT_NULL(local);
trampolines_.emplace(std::make_pair(key, local));
}
for (auto& pair : other->stubs_) {
uint32_t key = pair.first;
WasmCode* clone = CloneCode(pair.second);
- if (!clone) return false;
+ DCHECK_NOT_NULL(clone);
stubs_.emplace(std::make_pair(key, clone));
}
- return true;
}
WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
@@ -692,10 +692,10 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
original_code->reloc_info().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
- original_code->protected_instructions_);
- if (ret == nullptr) return nullptr;
+ original_code->handler_table_offset_,
+ original_code->protected_instructions_, original_code->is_liftoff());
if (!ret->IsAnonymous()) {
- SetCodeTable(ret->index(), ret);
+ code_table_[ret->index()] = ret;
}
intptr_t delta =
ret->instructions().start() - original_code->instructions().start();
@@ -707,10 +707,6 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
return ret;
}
-void NativeModule::SetCodeTable(uint32_t index, wasm::WasmCode* code) {
- code_table_[index] = code;
-}
-
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
wasm_code_manager_->FreeNativeModuleMemories(this);
@@ -889,11 +885,7 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
if (!ret) return ret;
- if (lazy_builtin() != nullptr) {
- ret->lazy_builtin_ = ret->CloneCode(lazy_builtin());
- }
-
- if (!ret->CloneTrampolinesAndStubs(this)) return nullptr;
+ ret->CloneTrampolinesAndStubs(this);
std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
for (auto& pair : trampolines_) {
@@ -917,20 +909,29 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
WasmCode* old_stub = stubs_.find(pair.first)->second;
PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
}
- if (lazy_builtin_ != nullptr) {
- PatchTrampolineAndStubCalls(lazy_builtin_, ret->lazy_builtin_,
- reverse_lookup);
- }
+ WasmCode* anonymous_lazy_builtin = nullptr;
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
case WasmCode::kLazyStub: {
- if (original_code->IsAnonymous()) {
- ret->SetCodeTable(i, ret->lazy_builtin());
- } else {
- if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
+ // Use the first anonymous lazy compile stub hit in this loop as the
+ // canonical copy for all further ones by remembering it locally via
+ // the {anonymous_lazy_builtin} variable. All non-anonymous such stubs
+ // are just cloned directly via {CloneLazyBuiltinInto} below.
+ if (!original_code->IsAnonymous()) {
+ WasmCode* new_code = ret->CloneLazyBuiltinInto(original_code, i);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ break;
+ }
+ if (anonymous_lazy_builtin == nullptr) {
+ WasmCode* new_code = ret->CloneCode(original_code);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ anonymous_lazy_builtin = new_code;
}
+ ret->code_table_[i] = anonymous_lazy_builtin;
} break;
case WasmCode::kFunction: {
WasmCode* new_code = ret->CloneCode(original_code);
@@ -941,7 +942,6 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
UNREACHABLE();
}
}
- ret->specialization_data_ = specialization_data_;
return ret;
}
@@ -1009,22 +1009,17 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
-void WasmCodeManager::FlushICache(Address start, size_t size) {
- Assembler::FlushICache(reinterpret_cast<internal::Isolate*>(isolate_), start,
- size);
-}
-
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
@@ -1039,8 +1034,8 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
#else
- rinfo->set_target_address(nullptr, reinterpret_cast<Address>(tag),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ rinfo->set_target_address(reinterpret_cast<Address>(tag), SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
#endif
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 3e2a0918fb..e398f1bcfd 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_HEAP_H_
-#define V8_WASM_HEAP_H_
+#ifndef V8_WASM_WASM_CODE_MANAGER_H_
+#define V8_WASM_WASM_CODE_MANAGER_H_
#include <functional>
#include <list>
@@ -111,6 +111,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
Address constant_pool() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
+ size_t handler_table_offset() const { return handler_table_offset_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return is_liftoff_; }
@@ -120,6 +121,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
void ResetTrapHandlerIndex();
const ProtectedInstructions& protected_instructions() const {
+ // TODO(mstarzinger): Code that doesn't have trapping instruction should
+ // not be required to have this vector, make it possible to be null.
+ DCHECK_NOT_NULL(protected_instructions_);
return *protected_instructions_.get();
}
@@ -139,9 +143,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
NativeModule* owner, Maybe<uint32_t> index, Kind kind,
size_t constant_pool_offset, uint32_t stack_slots,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff = false)
+ bool is_liftoff)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
reloc_size_(reloc_size),
@@ -151,6 +155,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
+ handler_table_offset_(handler_table_offset),
protected_instructions_(std::move(protected_instructions)),
is_liftoff_(is_liftoff) {}
@@ -169,6 +174,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// since there may be stack/register tagged values for large number
// conversions.
size_t safepoint_table_offset_ = 0;
+ size_t handler_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
std::shared_ptr<ProtectedInstructions> protected_instructions_;
bool is_liftoff_;
@@ -189,9 +195,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<NativeModule> Clone();
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
- size_t safepoint_table_offset,
- std::unique_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ std::unique_ptr<ProtectedInstructions>, bool is_liftoff);
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
@@ -204,11 +209,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterWrapper(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
- // calling SetLazyBuiltin. It will initialize the code table with it, and the
- // lazy_builtin_ field. The latter is used when creating entries for exported
+ // calling SetLazyBuiltin. It will initialize the code table with it. Copies
+ // of it might be cloned from them later when creating entries for exported
// functions and indirect callable functions, so that they may be identified
// by the runtime.
- WasmCode* SetLazyBuiltin(Handle<Code> code);
+ void SetLazyBuiltin(Handle<Code> code);
// ExportedWrappers are WasmToWasmWrappers for functions placed on import
// tables. We construct them as-needed.
@@ -219,8 +224,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
- WasmCode* lazy_builtin() const { return lazy_builtin_; }
-
// We special-case lazy cloning because we currently rely on making copies
// of the lazy builtin, to be able to identify, in the runtime, which function
// the lazy builtin is a placeholder of. If we used trampolines, we would call
@@ -229,7 +232,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// builtin. The logic for seeking though frames would change, though.
// TODO(mtrofin): perhaps we can do exactly that - either before or after
// this change.
- WasmCode* CloneLazyBuiltinInto(uint32_t);
+ WasmCode* CloneLazyBuiltinInto(const WasmCode* code, uint32_t);
bool SetExecutable(bool executable);
@@ -239,24 +242,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LinkAll();
void Link(uint32_t index);
- // TODO(mtrofin): needed until we sort out exception handlers and
- // source positions, which are still on the GC-heap.
+ // TODO(mstarzinger): needed until we sort out source positions, which are
+ // still on the GC-heap.
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
- // Shorthand accessors to the specialization data content.
- std::vector<wasm::GlobalHandleAddress>& function_tables() {
- return specialization_data_.function_tables;
- }
-
- std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
- return specialization_data_.empty_function_tables;
- }
-
uint32_t num_imported_functions() const { return num_imported_functions_; }
- size_t num_function_tables() const {
- return specialization_data_.empty_function_tables.size();
- }
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
@@ -266,6 +257,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
+ friend class NativeModuleModificationScope;
struct WasmCodeUniquePtrComparer {
bool operator()(const std::unique_ptr<WasmCode>& a,
@@ -276,7 +268,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
};
- static base::AtomicNumber<uint32_t> next_id_;
+ static base::AtomicNumber<size_t> next_id_;
NativeModule(const NativeModule&) = delete;
NativeModule& operator=(const NativeModule&) = delete;
NativeModule(uint32_t num_functions, uint32_t num_imports,
@@ -295,11 +287,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
- void SetCodeTable(uint32_t, wasm::WasmCode*);
+ bool is_liftoff);
WasmCode* CloneCode(const WasmCode*);
- bool CloneTrampolinesAndStubs(const NativeModule* other);
+ void CloneTrampolinesAndStubs(const NativeModule* other);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
@@ -319,20 +311,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
DisjointAllocationPool allocated_memory_;
std::list<VirtualMemory> owned_memory_;
WasmCodeManager* wasm_code_manager_;
- wasm::WasmCode* lazy_builtin_ = nullptr;
base::Mutex allocation_mutex_;
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
bool is_executable_ = false;
-
- // Specialization data that needs to be serialized and cloned.
- // Keeping it groupped together because it makes cloning of all these
- // elements a 1 line copy.
- struct {
- std::vector<wasm::GlobalHandleAddress> function_tables;
- std::vector<wasm::GlobalHandleAddress> empty_function_tables;
- } specialization_data_;
+ int modification_scope_depth_ = 0;
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
@@ -356,10 +340,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
- // TODO(mtrofin): replace this API with an alternative that is Isolate-
- // independent.
- void FlushICache(Address start, size_t size);
-
private:
friend class NativeModule;
@@ -416,4 +396,5 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_CODE_MANAGER_H_
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 416d1d600a..f261f44991 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -83,32 +83,33 @@ bool IsAtWasmDirectCallTarget(RelocIterator& it) {
} // namespace
-CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
- : isolate_(isolate) {}
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
DCHECK_NOT_NULL(new_context);
- DCHECK_NULL(new_wasm_context_address);
- new_wasm_context_address = new_context;
+ DCHECK_NULL(new_wasm_context_address_);
+ new_wasm_context_address_ = new_context;
}
void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
- DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
- old_function_table_size = old_size;
- new_function_table_size = new_size;
+ DCHECK(old_function_table_size_ == 0 && new_function_table_size_ == 0);
+ old_function_table_size_ = old_size;
+ new_function_table_size_ = new_size;
}
void CodeSpecialization::RelocateDirectCalls(
Handle<WasmInstanceObject> instance) {
- DCHECK(relocate_direct_calls_instance.is_null());
+ DCHECK(relocate_direct_calls_instance_.is_null());
DCHECK(!instance.is_null());
- relocate_direct_calls_instance = instance;
+ relocate_direct_calls_instance_ = instance;
}
void CodeSpecialization::RelocatePointer(Address old_ptr, Address new_ptr) {
- pointers_to_relocate.insert(std::make_pair(old_ptr, new_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(old_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(new_ptr));
+ pointers_to_relocate_.insert(std::make_pair(old_ptr, new_ptr));
}
bool CodeSpecialization::ApplyToWholeInstance(
@@ -147,14 +148,14 @@ bool CodeSpecialization::ApplyToWholeInstance(
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
- if (new_wasm_context_address) {
+ if (new_wasm_context_address_) {
reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
}
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
- // calls, the instance registered for that (relocate_direct_calls_instance)
+ // calls, the instance registered for that (relocate_direct_calls_instance_)
// should match the instance we currently patch (instance).
- if (!relocate_direct_calls_instance.is_null()) {
- DCHECK_EQ(instance, *relocate_direct_calls_instance);
+ if (!relocate_direct_calls_instance_.is_null()) {
+ DCHECK_EQ(instance, *relocate_direct_calls_instance_);
reloc_mode |=
RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
: RelocInfo::CODE_TARGET);
@@ -170,24 +171,23 @@ bool CodeSpecialization::ApplyToWholeInstance(
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::WASM_CONTEXT_REFERENCE:
- it.rinfo()->set_wasm_context_reference(export_wrapper->GetIsolate(),
- new_wasm_context_address,
+ it.rinfo()->set_wasm_context_reference(new_wasm_context_address_,
icache_flush_mode);
break;
case RelocInfo::JS_TO_WASM_CALL: {
DCHECK(FLAG_wasm_jit_to_native);
const WasmCode* new_code = native_module->GetCode(exp.index);
- it.rinfo()->set_js_to_wasm_address(
- nullptr, new_code->instructions().start(), SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_js_to_wasm_address(new_code->instructions().start(),
+ SKIP_ICACHE_FLUSH);
} break;
case RelocInfo::CODE_TARGET: {
DCHECK(!FLAG_wasm_jit_to_native);
// Ignore calls to other builtins like ToNumber.
if (!IsAtWasmDirectCallTarget(it)) continue;
Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(
- new_code->GetIsolate(), new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} break;
default:
UNREACHABLE();
@@ -210,9 +210,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
}
- bool patch_table_size = old_function_table_size || new_function_table_size;
- bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
- bool reloc_pointers = pointers_to_relocate.size() > 0;
+ bool patch_table_size = old_function_table_size_ || new_function_table_size_;
+ bool reloc_direct_calls = !relocate_direct_calls_instance_.is_null();
+ bool reloc_pointers = pointers_to_relocate_.size() > 0;
int reloc_mode = 0;
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
@@ -253,7 +253,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
// bytes to find the new compiled function.
size_t offset = it.rinfo()->pc() - code.GetCode()->instruction_start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
*code.GetCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -262,10 +262,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
FixedArray* code_table =
- relocate_direct_calls_instance->compiled_module()->code_table();
+ relocate_direct_calls_instance_->compiled_module()->code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
- it.rinfo()->set_target_address(new_code->GetIsolate(),
- new_code->instruction_start(),
+ it.rinfo()->set_target_address(new_code->instruction_start(),
UPDATE_WRITE_BARRIER, icache_flush_mode);
changed = true;
} break;
@@ -280,7 +279,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
size_t offset =
it.rinfo()->pc() - code.GetWasmCode()->instructions().start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
code.GetWasmCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -289,23 +288,24 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
const WasmCode* new_code = native_module->GetCode(called_func_index);
- it.rinfo()->set_wasm_call_address(
- isolate_, new_code->instructions().start(), icache_flush_mode);
+ it.rinfo()->set_wasm_call_address(new_code->instructions().start(),
+ icache_flush_mode);
changed = true;
} break;
case RelocInfo::WASM_GLOBAL_HANDLE: {
DCHECK(reloc_pointers);
Address old_ptr = it.rinfo()->global_handle();
- if (pointers_to_relocate.count(old_ptr) == 1) {
- Address new_ptr = pointers_to_relocate[old_ptr];
- it.rinfo()->set_global_handle(isolate_, new_ptr, icache_flush_mode);
+ auto entry = pointers_to_relocate_.find(old_ptr);
+ if (entry != pointers_to_relocate_.end()) {
+ Address new_ptr = entry->second;
+ it.rinfo()->set_global_handle(new_ptr, icache_flush_mode);
changed = true;
}
} break;
case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
DCHECK(patch_table_size);
it.rinfo()->update_wasm_function_table_size_reference(
- isolate_, old_function_table_size, new_function_table_size,
+ old_function_table_size_, new_function_table_size_,
icache_flush_mode);
changed = true;
break;
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index 8f68677fbf..bed565cf05 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_SPECIALIZATION_H_
-#define V8_WASM_CODE_SPECIALIZATION_H_
+#ifndef V8_WASM_WASM_CODE_SPECIALIZATION_H_
+#define V8_WASM_WASM_CODE_SPECIALIZATION_H_
#include "src/assembler.h"
#include "src/identity-map.h"
@@ -47,19 +47,18 @@ class CodeSpecialization {
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Isolate* isolate_;
- Address new_wasm_context_address = 0;
+ Address new_wasm_context_address_ = 0;
- uint32_t old_function_table_size = 0;
- uint32_t new_function_table_size = 0;
+ uint32_t old_function_table_size_ = 0;
+ uint32_t new_function_table_size_ = 0;
- Handle<WasmInstanceObject> relocate_direct_calls_instance;
+ Handle<WasmInstanceObject> relocate_direct_calls_instance_;
- std::map<Address, Address> pointers_to_relocate;
+ std::unordered_map<Address, Address> pointers_to_relocate_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_SPECIALIZATION_H_
+#endif // V8_WASM_WASM_CODE_SPECIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
index 9256391543..c9eee24f3d 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.cc
+++ b/deps/v8/src/wasm/wasm-code-wrapper.cc
@@ -7,7 +7,7 @@
#include "src/objects-inl.h"
#include "src/objects/code.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -59,10 +59,17 @@ Vector<uint8_t> WasmCodeWrapper::instructions() const {
static_cast<size_t>(code->instruction_size())};
}
-Handle<WasmInstanceObject> WasmCodeWrapper::wasm_instance() const {
- return IsCodeObject()
- ? handle(WasmInstanceObject::GetOwningInstanceGC(*GetCode()))
- : handle(WasmInstanceObject::GetOwningInstance(GetWasmCode()));
+WasmInstanceObject* WasmCodeWrapper::wasm_instance() const {
+ if (IsCodeObject()) {
+ WeakCell* weak_instance =
+ WeakCell::cast(GetCode()->deoptimization_data()->get(0));
+ return WasmInstanceObject::cast(weak_instance->value());
+ }
+ return GetWasmCode()->owner()->compiled_module()->owning_instance();
+}
+
+WasmContext* WasmCodeWrapper::wasm_context() const {
+ return wasm_instance()->wasm_context()->get();
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
index 7d978152f1..d51bc085aa 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.h
+++ b/deps/v8/src/wasm/wasm-code-wrapper.h
@@ -1,8 +1,8 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_WRAPPER_H_
-#define V8_WASM_CODE_WRAPPER_H_
+#ifndef V8_WASM_WASM_CODE_WRAPPER_H_
+#define V8_WASM_WASM_CODE_WRAPPER_H_
#include "src/handles.h"
@@ -13,6 +13,7 @@ class WasmCode;
} // namespace wasm
class Code;
+struct WasmContext;
class WasmInstanceObject;
// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
@@ -30,7 +31,8 @@ class WasmCodeWrapper {
Vector<uint8_t> instructions() const;
- Handle<WasmInstanceObject> wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const;
+ WasmContext* wasm_context() const;
#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
@@ -45,4 +47,4 @@ class WasmCodeWrapper {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_WRAPPER_H_
+#endif // V8_WASM_WASM_CODE_WRAPPER_H_
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 5e7ce1e4f5..932501d776 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CONSTANTS_H_
-#define V8_WASM_CONSTANTS_H_
+#ifndef V8_WASM_WASM_CONSTANTS_H_
+#define V8_WASM_WASM_CONSTANTS_H_
namespace v8 {
namespace internal {
@@ -80,4 +80,4 @@ constexpr WasmCodePosition kNoCodePosition = -1;
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CONSTANTS_H_
+#endif // V8_WASM_WASM_CONSTANTS_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 87995df4e6..08d436ffa4 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -438,7 +438,6 @@ class InterpreterHandle {
Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<JSObject> local_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
@@ -497,8 +496,6 @@ class InterpreterHandle {
Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
Handle<WasmDebugInfo> debug_info) {
auto frame = GetInterpretedFrame(frame_pointer, frame_index);
- Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<FixedArray> global_scope =
isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
@@ -591,8 +588,7 @@ void RedirectCallsitesInCodeGC(Code* code, CodeRelocationMapGC& map) {
Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
Handle<Code>* new_target = map.Find(target);
if (!new_target) continue;
- it.rinfo()->set_target_address(code->GetIsolate(),
- (*new_target)->instruction_start());
+ it.rinfo()->set_target_address((*new_target)->instruction_start());
}
}
@@ -606,7 +602,7 @@ void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_wasm_call_address(isolate, new_target->second);
+ it.rinfo()->set_wasm_call_address(new_target->second);
}
}
@@ -618,7 +614,7 @@ void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_js_to_wasm_address(isolate, new_target->second);
+ it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
@@ -685,7 +681,9 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
auto interp_handle =
Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
debug_info->set(kInterpreterHandleIndex, *interp_handle);
- return interp_handle->get()->interpreter();
+ auto ret = interp_handle->get()->interpreter();
+ ret->SetCallIndirectTestMode();
+ return ret;
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
@@ -850,12 +848,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Address context_address = reinterpret_cast<Address>(
- debug_info->wasm_instance()->has_memory_object()
- ? debug_info->wasm_instance()->wasm_context()
- : nullptr);
- Handle<Code> new_entry_code =
- compiler::CompileCWasmEntry(isolate, sig, context_address);
+ Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
Handle<SharedFunctionInfo> shared =
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 4c84b70dbd..460742d15a 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/wasm-engine.h"
+
#include "src/objects-inl.h"
#include "src/wasm/module-compiler.h"
@@ -18,6 +19,106 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
return result.ok();
}
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
+ CHECK(!result.failed());
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ asm_js_script, asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ Handle<Script>(), Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
+ memory);
+}
+
+void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmInstanceObject> instance = instance_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, instance);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+}
+
+void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared) {
+ if (!FLAG_wasm_async_compilation) {
+ // Asynchronous compilation disabled; fall back on synchronous compilation.
+ ErrorThrower thrower(isolate, "WasmCompile");
+ MaybeHandle<WasmModuleObject> module_object;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_object = SyncCompile(isolate, &thrower, bytes);
+ }
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, module);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+
+ if (FLAG_wasm_test_streaming) {
+ std::shared_ptr<StreamingDecoder> streaming_decoder =
+ isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(isolate, handle(isolate->context()),
+ promise);
+ streaming_decoder->OnBytesReceived(bytes.module_bytes());
+ streaming_decoder->Finish();
+ return;
+ }
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index bf06b47ed7..8a698c83b9 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef WASM_ENGINE_H_
-#define WASM_ENGINE_H_
+#ifndef V8_WASM_WASM_ENGINE_H_
+#define V8_WASM_WASM_ENGINE_H_
#include <memory>
@@ -14,8 +14,14 @@
namespace v8 {
namespace internal {
+class WasmModuleObject;
+class WasmInstanceObject;
+
namespace wasm {
+class ErrorThrower;
+struct ModuleWireBytes;
+
// The central data structure that represents an engine instance capable of
// loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine {
@@ -23,8 +29,44 @@ class V8_EXPORT_PRIVATE WasmEngine {
explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
: code_manager_(std::move(code_manager)) {}
+ // Synchronously validates the given bytes that represent an encoded WASM
+ // module.
bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+ // Synchronously compiles the given bytes that represent a translated
+ // asm.js module.
+ MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ // Synchronously compiles the given bytes that represent an encoded WASM
+ // module.
+ MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes);
+
+ // Synchronously instantiate the given WASM module with the given imports.
+ // If the module represents an asm.js module, then the supplied {memory}
+ // should be used as the memory of the instance.
+ MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+ // Begin an asynchronous compilation of the given bytes that represent an
+ // encoded WASM module, placing the result in the supplied {promise}.
+ // The {is_shared} flag indicates if the bytes backing the module could
+ // be shared across threads, i.e. could be concurrently modified.
+ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared);
+
+ // Begin an asynchronous instantiation of the given WASM module, placing the
+ // result in the supplied {promise}.
+ void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
CompilationManager* compilation_manager() { return &compilation_manager_; }
WasmCodeManager* code_manager() const { return code_manager_.get(); }
@@ -43,4 +85,4 @@ class V8_EXPORT_PRIVATE WasmEngine {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_ENGINE_H_
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index dea620338a..d44f5b242f 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -4,8 +4,8 @@
#include <stdint.h>
-#ifndef WASM_EXTERNAL_REFS_H
-#define WASM_EXTERNAL_REFS_H
+#ifndef V8_WASM_WASM_EXTERNAL_REFS_H_
+#define V8_WASM_WASM_EXTERNAL_REFS_H_
namespace v8 {
namespace internal {
@@ -77,4 +77,5 @@ void call_trap_callback_for_testing();
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_EXTERNAL_REFS_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 2f8fb0bf4a..3bcb1b5ef6 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -477,6 +477,17 @@ int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output;
if (!float64_to_int64_wrapper(&a, &output)) {
@@ -485,6 +496,17 @@ int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output;
if (!float32_to_uint64_wrapper(&a, &output)) {
@@ -493,6 +515,17 @@ uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output;
if (!float64_to_uint64_wrapper(&a, &output)) {
@@ -501,6 +534,17 @@ uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64UConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<int64_t>(a);
}
@@ -924,6 +968,9 @@ class CodeMap {
// This handle is set and reset by the SetInstanceObject() /
// ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
+ // TODO(wasm): Remove this testing wart. It is needed because interpreter
+ // entry stubs are not generated in testing the interpreter in cctests.
+ bool call_indirect_through_module_ = false;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
@@ -942,6 +989,12 @@ class CodeMap {
}
}
+ bool call_indirect_through_module() { return call_indirect_through_module_; }
+
+ void set_call_indirect_through_module(bool val) {
+ call_indirect_through_module_ = val;
+ }
+
void SetInstanceObject(Handle<WasmInstanceObject> instance) {
DCHECK(instance_.is_null());
instance_ = instance;
@@ -987,12 +1040,34 @@ class CodeMap {
}
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
+ uint32_t saved_index;
+ USE(saved_index);
if (table_index >= module_->function_tables.size()) return nullptr;
+ // Mask table index for SSCA mitigation.
+ saved_index = table_index;
+ table_index &=
+ static_cast<int32_t>((table_index - module_->function_tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
+ DCHECK_EQ(table_index, saved_index);
const WasmIndirectFunctionTable* table =
&module_->function_tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
+ // Mask entry_index for SSCA mitigation.
+ saved_index = entry_index;
+ entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
+ ~static_cast<int32_t>(entry_index)) >>
+ 31;
+ DCHECK_EQ(entry_index, saved_index);
uint32_t index = table->values[entry_index];
if (index >= interpreter_code_.size()) return nullptr;
+ // Mask index for SSCA mitigation.
+ saved_index = index;
+ index &= static_cast<int32_t>((index - interpreter_code_.size()) &
+ ~static_cast<int32_t>(index)) >>
+ 31;
+ DCHECK_EQ(index, saved_index);
+
return GetCode(index);
}
@@ -1543,9 +1618,21 @@ class ThreadImpl {
case kExprI32UConvertSatF64:
Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
return true;
+ case kExprI64SConvertSatF32:
+ Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64UConvertSatF32:
+ Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64SConvertSatF64:
+ Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
+ return true;
+ case kExprI64UConvertSatF64:
+ Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
+ return true;
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
return false;
@@ -1912,7 +1999,7 @@ class ThreadImpl {
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, operand.index);
+ CallIndirectFunction(0, entry_index, operand.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -2071,6 +2158,9 @@ class ThreadImpl {
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
Push(WasmValue(result));
len = 1 + operand.length;
+ // Treat one grow_memory instruction like 1000 other instructions,
+ // because it is a really expensive operation.
+ if (max > 0) max = std::max(0, max - 1000);
break;
}
case kExprMemorySize: {
@@ -2152,8 +2242,8 @@ class ThreadImpl {
#undef EXECUTE_UNOP
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
@@ -2386,18 +2476,24 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer in a handle. As this is an aligned
- // pointer, to the GC it will look like a Smi.
+ // Wrap the arg_buffer data pointer and the WasmContext* in a handle. As
+ // this is an aligned pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
+ static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
+ "code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
+ WasmContext* context = code.wasm_context();
+ Handle<Object> context_obj(reinterpret_cast<Object*>(context), isolate);
+ DCHECK(!context_obj->IsHeapObject());
args[compiler::CWasmEntryParameters::kCodeObject] =
code.IsCodeObject()
? Handle<Object>::cast(code.GetCode())
: Handle<Object>::cast(isolate->factory()->NewForeign(
code.GetWasmCode()->instructions().start(), TENURED));
+ args[compiler::CWasmEntryParameters::kWasmContext] = context_obj;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2466,13 +2562,19 @@ class ThreadImpl {
DCHECK(AllowHeapAllocation::IsAllowed());
if (code->kind() == wasm::WasmCode::kFunction) {
- DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
- codemap()->instance());
+ if (code->owner()->compiled_module()->owning_instance() !=
+ codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
+
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
+ }
+ if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
+ code->kind() == wasm::WasmCode::kInterpreterStub) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2501,23 +2603,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- bool no_func_tables = !codemap()->has_instance();
- if (FLAG_wasm_jit_to_native) {
- no_func_tables = no_func_tables || codemap()
- ->instance()
- ->compiled_module()
- ->GetNativeModule()
- ->function_tables()
- .empty();
- } else {
- no_func_tables =
- no_func_tables ||
- !codemap()->instance()->compiled_module()->has_function_tables();
- }
- if (no_func_tables) {
- // No instance. Rely on the information stored in the WasmModule.
- // TODO(wasm): This is only needed for testing. Refactor testing to use
- // the same paths as production.
+ if (codemap()->call_indirect_through_module()) {
+ // Rely on the information stored in the WasmModule.
InterpreterCode* code =
codemap()->GetIndirectCode(table_index, entry_index);
if (!code) return {ExternalCallResult::INVALID_FUNC};
@@ -2551,7 +2638,7 @@ class ThreadImpl {
DCHECK_EQ(canonical_sig_index,
module()->signature_map.Find(module()->signatures[sig_index]));
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
// Check signature.
FixedArray* fun_tables = compiled_module->function_tables();
if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
@@ -2578,33 +2665,23 @@ class ThreadImpl {
target_gc = Code::cast(fun_table->get(
compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
- // Check signature.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- if (table_index >= fun_tables.size()) {
+ // The function table is stored in the wasm context.
+ // TODO(wasm): the wasm interpreter currently supports only one table.
+ CHECK_EQ(0, table_index);
+ // Bounds check against table size.
+ if (entry_index >= wasm_context_->table_size) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to the function table, from the
- // address stored in the respective table of tables.
- FixedArray* fun_table =
- *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
- // Function tables store <smi, code> pairs.
- int num_funcs_in_table =
- fun_table->length() / compiler::kFunctionTableEntrySize;
- if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(fun_table->get(
- compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ // Signature check.
+ int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
+ if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
-
+ // Load the target address (first instruction of code).
+ Address first_instr = wasm_context_->table[entry_index].target;
+ // TODO(titzer): load the wasm context instead of relying on the
+ // target code being specialized to the target instance.
// Get code object.
- Address first_instr =
- Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
- static_cast<int>(entry_index))))
- ->foreign_address();
target =
isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
first_instr);
@@ -2897,6 +2974,10 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
+void WasmInterpreter::SetCallIndirectTestMode() {
+ internals_->codemap_.set_call_indirect_through_module(true);
+}
+
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index b0c100b5a9..88d21c37d1 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_INTERPRETER_H_
-#define V8_WASM_INTERPRETER_H_
+#ifndef V8_WASM_WASM_INTERPRETER_H_
+#define V8_WASM_WASM_INTERPRETER_H_
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
@@ -215,6 +215,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Manually adds code to the interpreter for the given function.
void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
+ void SetCallIndirectTestMode();
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
@@ -230,4 +231,4 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_INTERPRETER_H_
+#endif // V8_WASM_WASM_INTERPRETER_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ce2bf42455..dc1f690a63 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -16,8 +16,6 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -40,6 +38,35 @@ namespace {
} \
} while (false)
+// Like an ErrorThrower, but turns all pending exceptions into scheduled
+// exceptions when going out of scope. Use this in API methods.
+// Note that pending exceptions are not necessarily created by the ErrorThrower,
+// but e.g. by the wasm start function. There might also be a scheduled
+// exception, created by another API call (e.g. v8::Object::Get). But there
+// should never be both pending and scheduled exceptions.
+class ScheduledErrorThrower : public ErrorThrower {
+ public:
+ ScheduledErrorThrower(i::Isolate* isolate, const char* context)
+ : ErrorThrower(isolate, context) {}
+
+ ~ScheduledErrorThrower();
+};
+
+ScheduledErrorThrower::~ScheduledErrorThrower() {
+ // There should never be both a pending and a scheduled exception.
+ DCHECK(!isolate()->has_scheduled_exception() ||
+ !isolate()->has_pending_exception());
+ // Don't throw another error if there is already a scheduled error.
+ if (isolate()->has_scheduled_exception()) {
+ Reset();
+ } else if (isolate()->has_pending_exception()) {
+ Reset();
+ isolate()->OptionalRescheduleException(false);
+ } else if (error()) {
+ isolate()->ScheduleThrow(*Reify());
+ }
+}
+
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -123,8 +150,7 @@ void WebAssemblyCompileStreaming(
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.compileStreaming()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
thrower.CompileError("Wasm code generation disallowed by embedder");
auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false),
@@ -144,7 +170,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
thrower.CompileError("Wasm code generation disallowed by embedder");
@@ -165,7 +191,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
// Asynchronous compilation handles copying wire bytes if necessary.
- i::wasm::AsyncCompile(i_isolate, promise, bytes, is_shared);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, promise, bytes, is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -173,7 +199,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
bool is_shared = false;
auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
@@ -209,7 +235,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i_isolate->wasm_module_callback()(args)) return;
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Module must be invoked with 'new'");
@@ -233,10 +259,12 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes_copy);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes);
}
if (module_obj.is_null()) return;
@@ -250,8 +278,7 @@ void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.imports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -264,8 +291,7 @@ void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -279,8 +305,8 @@ void WebAssemblyModuleCustomSections(
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.customSections()");
+ ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.Module.customSections()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -303,8 +329,7 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::MaybeHandle<i::Object> instance_object;
{
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly Instantiation");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
i::MaybeHandle<i::JSReceiver> maybe_imports =
GetValueAsImports(ffi, &thrower);
if (thrower.error()) return {};
@@ -312,9 +337,9 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::Handle<i::WasmModuleObject> module_obj =
i::Handle<i::WasmModuleObject>::cast(
Utils::OpenHandle(Object::Cast(*module)));
- instance_object =
- i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
- i::MaybeHandle<i::JSArrayBuffer>());
+ instance_object = i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
}
DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
@@ -386,7 +411,7 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
return;
@@ -444,8 +469,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
@@ -521,7 +545,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Table must be invoked with 'new'");
return;
@@ -578,7 +602,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Memory must be invoked with 'new'");
return;
@@ -672,8 +696,7 @@ void WebAssemblyInstanceGetExports(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Instance.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance.exports()");
EXTRACT_THIS(receiver, WasmInstanceObject);
i::Handle<i::JSObject> exports_object(receiver->exports_object());
args.GetReturnValue().Set(Utils::ToLocal(exports_object));
@@ -684,8 +707,7 @@ void WebAssemblyTableGetLength(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Table.length()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
EXTRACT_THIS(receiver, WasmTableObject);
args.GetReturnValue().Set(
v8::Number::New(isolate, receiver->current_length()));
@@ -696,7 +718,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -738,7 +760,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
@@ -759,7 +781,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -807,8 +829,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
@@ -826,7 +847,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
uint32_t old_size =
- old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
+ old_buffer->byte_length()->Number() / i::wasm::kWasmPageSize;
int64_t new_size64 = old_size + delta_size;
if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
@@ -849,8 +870,7 @@ void WebAssemblyMemoryGetBuffer(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.buffer");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
EXTRACT_THIS(receiver, WasmMemoryObject);
i::Handle<i::Object> buffer_obj(receiver->array_buffer(), i_isolate);
@@ -931,7 +951,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
name, isolate->strict_function_map(), LanguageMode::kStrict);
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 926bd7647a..bdcc1f061e 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_JS_H_
-#define V8_WASM_JS_H_
+#ifndef V8_WASM_WASM_JS_H_
+#define V8_WASM_WASM_JS_H_
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -16,14 +15,9 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
-
- // WebAssembly.Table.
- static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
-
- // WebAssembly.Memory
- static bool IsWasmMemoryObject(Isolate* isolate, Handle<Object> value);
};
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_JS_H_
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 184b6329ba..c1011c3f89 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -48,6 +48,8 @@ static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
+static_assert(kV8MaxWasmMemoryBytes <= std::numeric_limits<int32_t>::max(),
+ "max memory bytes should fit in int32_t");
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index fcbe60ae0e..38cd8973a6 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+#else
+ static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true;
}
allocated_address_space_ -= num_bytes;
-#endif
return false;
}
@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
- // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (require_guard_regions) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
-
- WasmAllocationTracker* const allocation_tracker =
- isolate->wasm_engine()->allocation_tracker();
-
- // Let the WasmAllocationTracker know we are going to reserve a bunch of
- // address space.
- if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
- // If we are over the address space limit, fail.
- return nullptr;
- }
-
- // The Reserve makes the whole region inaccessible by default.
- *allocation_base =
- isolate->array_buffer_allocator()->Reserve(*allocation_length);
- if (*allocation_base == nullptr) {
- allocation_tracker->ReleaseAddressSpace(*allocation_length);
- return nullptr;
- }
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ *allocation_length = require_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ : base::bits::RoundUpToPowerOfTwo32(RoundUp(
+ static_cast<uint32_t>(size), kWasmPageSize));
+ DCHECK_GE(*allocation_length, size);
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
- void* memory = *allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- // TODO(titzer): use guard regions for minicage and merge with above code.
- CHECK_LE(size, kV8MaxWasmMemoryBytes);
- *allocation_length =
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
- void* memory =
- size == 0
- ? nullptr
- : isolate->array_buffer_allocator()->Allocate(*allocation_length);
- *allocation_base = memory;
- return memory;
+ // The Reserve makes the whole region inaccessible by default.
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
+ return nullptr;
}
+
+ void* memory = *allocation_base;
+
+ // Make the part we care about accessible.
+ CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite));
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
+ // All buffers have guard regions now, but sometimes they are small.
+ constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, require_guard_regions, shared);
+ size, is_external, has_guard_region, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index c5d6ef5154..438014b417 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MEMORY_H_
-#define V8_WASM_MEMORY_H_
+#ifndef V8_WASM_WASM_MEMORY_H_
+#define V8_WASM_WASM_MEMORY_H_
#include "src/flags.h"
#include "src/handles.h"
@@ -49,4 +49,4 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MEMORY_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index b6b9117ae5..909b62a16f 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -18,7 +18,6 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
@@ -157,7 +156,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig) {
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig) {
Address new_context_address =
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
@@ -173,6 +172,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
wasm::WasmCode* exported_wrapper =
native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
if (exported_wrapper == nullptr) {
+ wasm::NativeModuleModificationScope native_modification_scope(
+ native_module);
Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
exported_wrapper = native_module->AddExportedWrapper(
@@ -181,10 +182,11 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
Address target = exported_wrapper->instructions().start();
return isolate->factory()->NewForeign(target, TENURED);
}
+ CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
Handle<Code> code = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
AttachWasmFunctionInfo(isolate, code, owning_instance,
- static_cast<int>(index));
+ static_cast<int>(func_index));
return code;
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 492c51487f..405b5f3ff4 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MODULE_H_
-#define V8_WASM_MODULE_H_
+#ifndef V8_WASM_WASM_MODULE_H_
+#define V8_WASM_WASM_MODULE_H_
#include <memory>
@@ -275,7 +275,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig);
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
@@ -323,4 +323,4 @@ class TruncatedUserString {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 0a85862174..4891ad671a 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_INL_H_
-#define V8_WASM_OBJECTS_INL_H_
+#ifndef V8_WASM_WASM_OBJECTS_INL_H_
+#define V8_WASM_WASM_OBJECTS_INL_H_
#include "src/heap/heap-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -158,4 +158,4 @@ void WasmCompiledModule::ReplaceCodeTableForTesting(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_INL_H_
+#endif // V8_WASM_WASM_OBJECTS_INL_H_
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index c92a51716a..f06f3240f0 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -248,11 +248,44 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ if (count == 0) return; // Degenerate case: nothing to do.
+
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
+ constexpr int kInvalidSigIndex = -1;
+
+ if (WASM_CONTEXT_TABLES) {
+ // If tables are stored in the WASM context, no code patching is
+ // necessary. We simply have to grow the raw tables in the WasmContext
+ // for each instance that has imported this table.
+
+ // TODO(titzer): replace the dispatch table with a weak list of all
+ // the instances that import a given table.
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // TODO(titzer): potentially racy update of WasmContext::table
+ WasmContext* wasm_context =
+ WasmInstanceObject::cast(dispatch_tables->get(i))
+ ->wasm_context()
+ ->get();
+ DCHECK_EQ(old_size, wasm_context->table_size);
+ uint32_t new_size = old_size + count;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ realloc(wasm_context->table,
+ new_size * sizeof(IndirectFunctionTableEntry)));
+ for (uint32_t j = old_size; j < new_size; j++) {
+ wasm_context->table[j].sig_id = kInvalidSigIndex;
+ wasm_context->table[j].context = nullptr;
+ wasm_context->table[j].target = nullptr;
+ }
+ wasm_context->table_size = new_size;
+ }
+ return;
+ }
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
for (int i = 0; i < dispatch_tables->length();
@@ -272,24 +305,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
*new_function_table);
// Patch the code of the respective instance.
- if (FLAG_wasm_jit_to_native) {
- DisallowHeapAllocation no_gc;
- wasm::CodeSpecialization code_specialization(isolate,
- &specialization_zone);
- WasmInstanceObject* instance =
- WasmInstanceObject::cast(dispatch_tables->get(i));
- WasmCompiledModule* compiled_module = instance->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- wasm::NativeModuleModificationScope native_module_modification_scope(
- native_module);
- GlobalHandleAddress old_function_table_addr =
- native_module->function_tables()[table_index];
- code_specialization.PatchTableSize(old_size, old_size + count);
- code_specialization.RelocatePointer(old_function_table_addr,
- new_function_table_addr);
- code_specialization.ApplyToWholeInstance(instance);
- native_module->function_tables()[table_index] = new_function_table_addr;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
&specialization_zone);
@@ -311,70 +327,104 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t index, Handle<JSFunction> function) {
+ int32_t table_index, Handle<JSFunction> function) {
Handle<FixedArray> array(table->functions(), isolate);
+ if (function.is_null()) {
+ ClearDispatchTables(table, table_index); // Degenerate case of null value.
+ array->set(table_index, isolate->heap()->null_value());
+ return;
+ }
+
+ // TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
+ auto exported_function = Handle<WasmExportedFunction>::cast(function);
+ auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ DCHECK_NOT_NULL(wasm_function);
+ DCHECK_NOT_NULL(wasm_function->sig);
+ WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
+ handle(exported_function->instance()), wasm_code,
+ exported_function->function_index());
+ array->set(table_index, *function);
+}
+
+void WasmTableObject::UpdateDispatchTables(
+ Isolate* isolate, Handle<WasmTableObject> table, int table_index,
+ wasm::FunctionSig* sig, Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index) {
+ if (WASM_CONTEXT_TABLES) {
+ // We simply need to update the WASM contexts for each instance
+ // that imports this table.
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-
- wasm::FunctionSig* sig = nullptr;
- Handle<Object> code = Handle<Object>::null();
- Handle<Object> value = isolate->factory()->null_value();
-
- if (!function.is_null()) {
- auto exported_function = Handle<WasmExportedFunction>::cast(function);
- auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
- // The verification that {function} is an export was done
- // by the caller.
- DCHECK(wasm_function != nullptr && wasm_function->sig != nullptr);
- sig = wasm_function->sig;
- value = function;
- // TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
- // and then we can just reuse the WASM to WASM wrapper.
- WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
- wasm::NativeModule* native_module =
- wasm_code.IsCodeObject() ? nullptr : wasm_code.GetWasmCode()->owner();
- CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
- wasm::NativeModuleModificationScope native_modification_scope(
- native_module);
- code = wasm::GetOrCreateIndirectCallWrapper(
- isolate, handle(exported_function->instance()), wasm_code,
- exported_function->function_index(), sig);
- }
- UpdateDispatchTables(table, index, sig, code);
- array->set(index, *value);
-}
-
-void WasmTableObject::UpdateDispatchTables(Handle<WasmTableObject> table,
- int index, wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign) {
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+ auto& entry = to_instance->wasm_context()->get()->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = from_instance->wasm_context()->get();
+ entry.target = wasm_code.instructions().start();
+ }
+ } else {
+ // We may need to compile a new WASM->WASM wrapper for this.
+ Handle<Object> code_or_foreign = wasm::GetOrCreateIndirectCallWrapper(
+ isolate, from_instance, wasm_code, func_index, sig);
+
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+
+ function_table->set(compiler::FunctionTableSigOffset(table_index),
+ Smi::FromInt(sig_id));
+ function_table->set(compiler::FunctionTableCodeOffset(table_index),
+ *code_or_foreign);
+ }
+ }
+}
+
+void WasmTableObject::ClearDispatchTables(Handle<WasmTableObject> table,
+ int index) {
DisallowHeapAllocation no_gc;
FixedArray* dispatch_tables = table->dispatch_tables();
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- FixedArray* function_table = FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
- Smi* sig_smi = Smi::FromInt(-1);
- Object* code = Smi::kZero;
- if (sig) {
- DCHECK(code_or_foreign->IsCode() || code_or_foreign->IsForeign());
- WasmInstanceObject* instance = WasmInstanceObject::cast(
+ if (WASM_CONTEXT_TABLES) {
+ constexpr int kInvalidSigIndex = -1; // TODO(titzer): move to header.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset));
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_index = instance->module()->signature_map.Find(sig);
- sig_smi = Smi::FromInt(sig_index);
- code = *code_or_foreign;
+ DCHECK_LT(index, to_instance->wasm_context()->get()->table_size);
+ auto& entry = to_instance->wasm_context()->get()->table[index];
+ entry.sig_id = kInvalidSigIndex;
+ entry.context = nullptr;
+ entry.target = nullptr;
} else {
- DCHECK(code_or_foreign.is_null());
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+ function_table->set(compiler::FunctionTableSigOffset(index),
+ Smi::FromInt(-1));
+ function_table->set(compiler::FunctionTableCodeOffset(index), Smi::kZero);
}
- function_table->set(compiler::FunctionTableSigOffset(index), sig_smi);
- function_table->set(compiler::FunctionTableCodeOffset(index), code);
}
}
namespace {
-
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
uint32_t pages, uint32_t maximum_pages,
@@ -393,20 +443,22 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions =
- old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
- if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
+ // Reusing the backing store from externalized buffers causes problems with
+ // Blink's array buffers. The connection between the two is lost, which can
+ // lead to Blink not knowing about the other reference to the buffer and
+ // freeing it too early.
+ if (!old_buffer->is_external() && old_size != 0 &&
+ ((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
- isolate->array_buffer_allocator()->SetProtection(
- old_mem_start, new_size,
- v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ CHECK(i::SetPermissions(old_mem_start, new_size,
+ PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
@@ -426,23 +478,13 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
} else {
bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
- if (pages != 0) {
- // Allocate a new buffer and memcpy the old contents.
- free_memory = true;
- new_buffer =
- wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
- DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
- DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
- } else {
- // Reuse the prior backing store, but allocate a new array buffer.
- new_buffer = wasm::SetupArrayBuffer(
- isolate, old_buffer->allocation_base(),
- old_buffer->allocation_length(), old_buffer->backing_store(),
- new_size, old_buffer->is_external(), old_buffer->has_guard_region());
- }
+ // Allocate a new buffer and memcpy the old contents.
+ free_memory = true;
+ new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
@@ -667,6 +709,91 @@ void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
CHECK(compiled_module->weak_wasm_module()->cleared());
}
+namespace {
+void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ if (FLAG_wasm_jit_to_native) {
+ if (native_module) {
+ TRACE("Finalizing %zu {\n", native_module->instance_id);
+ } else {
+ TRACE("Finalized already cleaned up compiled module\n");
+ }
+ } else {
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+
+ if (compiled_module->use_trap_handler()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ DisallowHeapAllocation no_gc;
+ FixedArray* code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Code* code = Code::cast(code_table->get(i));
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(
+ Smi::FromInt(trap_handler::kInvalidIndex));
+ }
+ }
+ }
+ }
+ WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
+
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
+ // weak_wasm_module may have been cleared, meaning the module object
+ // was GC-ed. We still want to maintain the links between instances, to
+ // release the WasmCompiledModule corresponding to the WasmModuleInstance
+ // being finalized here.
+ WasmModuleObject* wasm_module = nullptr;
+ if (!weak_wasm_module->cleared()) {
+ wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
+
+ DCHECK(!current_template->has_prev_instance());
+ if (current_template == compiled_module) {
+ if (!compiled_module->has_next_instance()) {
+ WasmCompiledModule::Reset(isolate, compiled_module);
+ } else {
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(compiled_module->next_instance());
+ }
+ }
+ }
+
+ compiled_module->RemoveFromChain();
+
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
+}
+
+} // namespace
+
+void WasmInstanceObject::InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ InstanceFinalizer, v8::WeakCallbackType::kFinalizer);
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
Handle<JSFunction> js_function(JSFunction::cast(object));
@@ -721,8 +848,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_length(arity);
shared->set_internal_formal_parameter_count(arity);
NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, export_wrapper, isolate->sloppy_function_map());
+ name, export_wrapper, isolate->sloppy_function_without_prototype_map());
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
+ // According to the spec, exported functions should not have a [[Construct]]
+ // method.
+ DCHECK(!js_function->IsConstructor());
js_function->set_shared(*shared);
Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
@@ -917,7 +1047,7 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
int position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = shared->GetIsolate();
Handle<FixedArray> breakpoint_infos;
if (shared->has_breakpoint_infos()) {
@@ -937,7 +1067,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
position) {
Handle<BreakPointInfo> old_info(
BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
- BreakPointInfo::SetBreakPoint(old_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(old_info, break_point);
return;
}
@@ -964,7 +1094,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
// Generate new BreakpointInfo.
Handle<BreakPointInfo> breakpoint_info =
isolate->factory()->NewBreakPointInfo(position);
- BreakPointInfo::SetBreakPoint(breakpoint_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(breakpoint_info, break_point);
// Now insert new position at insert_pos.
new_breakpoint_infos->set(insert_pos, *breakpoint_info);
@@ -1005,6 +1135,7 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
Handle<WasmSharedModuleData> shared) {
if (shared->has_lazy_compilation_orchestrator()) return;
Isolate* isolate = shared->GetIsolate();
+ // TODO(titzer): remove dependency on module-compiler.h
auto orch_handle =
Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
shared->set_lazy_compilation_orchestrator(*orch_handle);
@@ -1235,9 +1366,8 @@ MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
if (breakpoint_info->source_position() != position) return {};
- Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
- isolate);
- return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+ Handle<Object> break_points(breakpoint_info->break_points(), isolate);
+ return isolate->debug()->GetHitBreakPoints(break_points);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
@@ -1303,13 +1433,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// has_code_table and pass undefined.
compiled_module->set_code_table(*code_table);
- native_module->function_tables() = function_tables;
- native_module->empty_function_tables() = function_tables;
-
int function_count = static_cast<int>(module->functions.size());
- Handle<FixedArray> handler_table =
- isolate->factory()->NewFixedArray(function_count, TENURED);
- compiled_module->set_handler_table(*handler_table);
Handle<FixedArray> source_positions =
isolate->factory()->NewFixedArray(function_count, TENURED);
compiled_module->set_source_positions(*source_positions);
@@ -1338,6 +1462,10 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
return ret;
}
+ Handle<FixedArray> export_copy = isolate->factory()->CopyFixedArray(
+ handle(module->export_wrappers(), isolate));
+ ret->set_export_wrappers(*export_copy);
+
std::unique_ptr<wasm::NativeModule> native_module =
module->GetNativeModule()->Clone();
// construct the wrapper in 2 steps, because its construction may trigger GC,
@@ -1387,65 +1515,6 @@ wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
return Managed<wasm::NativeModule>::cast(native_module())->get();
}
-void WasmCompiledModule::ResetGCModel(Isolate* isolate,
- WasmCompiledModule* compiled_module) {
- DisallowHeapAllocation no_gc;
- TRACE("Resetting %d\n", compiled_module->instance_id());
- Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->code_table();
- if (fct_obj != nullptr && fct_obj != undefined) {
- // Patch code to update memory references, global references, and function
- // table references.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- // Reset function tables.
- if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->function_tables();
- FixedArray* empty_function_tables =
- compiled_module->empty_function_tables();
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables->length(), empty_function_tables->length());
- for (int i = 0, e = function_tables->length(); i < e; ++i) {
- GlobalHandleAddress func_addr =
- WasmCompiledModule::GetTableValue(function_tables, i);
- code_specialization.RelocatePointer(
- func_addr,
- WasmCompiledModule::GetTableValue(empty_function_tables, i));
- }
- compiled_module->set_function_tables(empty_function_tables);
- }
- }
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = compiled_module->num_imported_functions(),
- end = functions->length();
- i < end; ++i) {
- Code* code = Code::cast(functions->get(i));
- // Skip lazy compile stubs.
- if (code->builtin_index() == Builtins::kWasmCompileLazy) continue;
- if (code->kind() != Code::WASM_FUNCTION) {
- // From here on, there should only be wrappers for exported functions.
- for (; i < end; ++i) {
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
- Code::cast(functions->get(i))->kind());
- }
- break;
- }
- bool changed = code_specialization.ApplyToWasmCode(
- WasmCodeWrapper(handle(code)), SKIP_ICACHE_FLUSH);
- // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
- // above.
- if (changed) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
- }
- }
-}
-
void WasmCompiledModule::InitId() {
#if DEBUG
static uint32_t instance_id_counter = 0;
@@ -1491,22 +1560,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
i, isolate->heap()->undefined_value());
}
}
- // Reset function tables.
- if (native_module->function_tables().size() > 0) {
- std::vector<GlobalHandleAddress>& function_tables =
- native_module->function_tables();
- std::vector<GlobalHandleAddress>& empty_function_tables =
- native_module->empty_function_tables();
-
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables.size(), empty_function_tables.size());
- for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
- code_specialization.RelocatePointer(function_tables[i],
- empty_function_tables[i]);
- }
- native_module->function_tables() = empty_function_tables;
- }
- }
for (uint32_t i = native_module->num_imported_functions(),
end = native_module->FunctionCount();
@@ -1519,7 +1572,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
// above.
if (changed) {
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
@@ -1646,30 +1699,23 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
}
size_t function_table_count =
compiled_module->shared()->module()->function_tables.size();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (function_table_count > 0) {
// The tables are of the right size, but contain bogus global handle
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
- Handle<FixedArray> function_tables;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
DCHECK(compiled_module->has_function_tables());
- function_tables =
- handle(compiled_module->empty_function_tables(), isolate);
- } else {
- DCHECK_GT(native_module->function_tables().size(), 0);
- }
- for (size_t i = 0; i < function_table_count; ++i) {
- Handle<Object> global_func_table_handle =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
- GlobalHandleAddress new_func_table = global_func_table_handle.address();
- if (!FLAG_wasm_jit_to_native) {
+ Handle<FixedArray> function_tables(
+ compiled_module->empty_function_tables(), isolate);
+ for (size_t i = 0; i < function_table_count; ++i) {
+ Handle<Object> global_func_table_handle =
+ isolate->global_handles()->Create(
+ isolate->heap()->undefined_value());
+ GlobalHandleAddress new_func_table = global_func_table_handle.address();
SetTableValue(isolate, function_tables, static_cast<int>(i),
new_func_table);
- } else {
- native_module->empty_function_tables()[i] = new_func_table;
}
}
}
@@ -1761,10 +1807,9 @@ bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
return true;
}
-
bool WasmCompiledModule::SetBreakPoint(
Handle<WasmCompiledModule> compiled_module, int* position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = compiled_module->GetIsolate();
Handle<WasmSharedModuleData> shared(compiled_module->shared(), isolate);
@@ -1779,7 +1824,7 @@ bool WasmCompiledModule::SetBreakPoint(
DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
// Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(shared, *position, break_point_object);
+ WasmSharedModuleData::AddBreakpoint(shared, *position, break_point);
// Iterate over all instances of this module and tell them to set this new
// breakpoint.
@@ -1793,6 +1838,28 @@ bool WasmCompiledModule::SetBreakPoint(
return true;
}
+void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
+ wasm::NativeModule* native_module = GetNativeModule();
+ if (native_module == nullptr) return;
+ const uint32_t number_of_codes = native_module->FunctionCount();
+ if (has_shared()) {
+ Handle<WasmSharedModuleData> shared_handle(shared(), isolate);
+ for (uint32_t i = 0; i < number_of_codes; i++) {
+ wasm::WasmCode* code = native_module->GetCode(i);
+ if (code == nullptr) continue;
+ int name_length;
+ Handle<String> name(
+ WasmSharedModuleData::GetFunctionName(isolate, shared_handle, i));
+ auto cname = name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL,
+ &name_length);
+ wasm::WasmName wasm_name(cname.get(), name_length);
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code,
+ wasm_name));
+ }
+ }
+}
+
void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
MaybeHandle<WeakCell> weak_instance,
int func_index) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index cecc11f83f..fe2ed419db 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_H_
-#define V8_WASM_OBJECTS_H_
+#ifndef V8_WASM_WASM_OBJECTS_H_
+#define V8_WASM_WASM_OBJECTS_H_
#include "src/base/bits.h"
#include "src/debug/debug.h"
@@ -38,6 +38,8 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
+#define WASM_CONTEXT_TABLES FLAG_wasm_jit_to_native
+
#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
#define DECL_OOL_CAST(type) static type* cast(Object* object);
@@ -55,6 +57,15 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
+// An entry in an indirect dispatch table.
+struct IndirectFunctionTableEntry {
+ int32_t sig_id = 0;
+ WasmContext* context = nullptr;
+ Address target = nullptr;
+
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(IndirectFunctionTableEntry)
+};
+
// Wasm context used to store the mem_size and mem_start address of the linear
// memory. These variables can be accessed at C++ level at graph build time
// (e.g., initialized during instance building / changed at runtime by
@@ -67,14 +78,27 @@ struct WasmContext {
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
+ // TODO(wasm): pad these entries to a power of two.
+ IndirectFunctionTableEntry* table = nullptr;
+ uint32_t table_size = 0;
- inline void SetRawMemory(void* mem_start, size_t mem_size) {
+ void SetRawMemory(void* mem_start, size_t mem_size) {
DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
+
+ ~WasmContext() {
+ if (table) free(table);
+ mem_start = nullptr;
+ mem_size = 0;
+ mem_mask = 0;
+ globals_start = nullptr;
+ table = nullptr;
+ table_size = 0;
+ }
};
// Representation of a WebAssembly.Module JavaScript-level object.
@@ -137,9 +161,13 @@ class WasmTableObject : public JSObject {
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
- static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
- wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int table_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index);
+
+ static void ClearDispatchTables(Handle<WasmTableObject> table, int index);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -249,6 +277,9 @@ class WasmInstanceObject : public JSObject {
static void ValidateOrphanedInstanceForTesting(
Isolate* isolate, Handle<WasmInstanceObject> instance);
+
+ static void InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
};
// A WASM function that is wrapped and exported to JavaScript.
@@ -306,7 +337,7 @@ class WasmSharedModuleData : public FixedArray {
Handle<WasmSharedModuleData>);
static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
Handle<WasmInstanceObject>);
@@ -468,7 +499,6 @@ class WasmCompiledModule : public FixedArray {
MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
- MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
MACRO(OBJECT, FixedArray, lazy_compile_data) \
@@ -478,9 +508,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(OBJECT, FixedArray, signature_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
+ MACRO(CONST_OBJECT, FixedArray, empty_function_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
@@ -516,9 +544,6 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- // TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
- static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
-
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
@@ -543,7 +568,7 @@ class WasmCompiledModule : public FixedArray {
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
inline void ReplaceCodeTableForTesting(
std::vector<wasm::WasmCode*>&& testing_table);
@@ -556,6 +581,8 @@ class WasmCompiledModule : public FixedArray {
static Address GetTableValue(FixedArray* table, int index);
inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
+ void LogWasmCodes(Isolate* isolate);
+
private:
void InitId();
@@ -692,4 +719,4 @@ WasmFunctionInfo GetWasmFunctionInfo(Isolate*, Handle<Code>);
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_H_
+#endif // V8_WASM_WASM_OBJECTS_H_
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index b503aa1a5e..ac02b549a0 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -101,9 +101,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- // TODO(kschimpf): Add I64 versions of saturating conversions.
+ // TODO(kschimpf): Simplify after filling in other saturating operations.
CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
@@ -116,6 +118,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+ CASE_INT_OP(SExtendI8, "sign_extend8")
+ CASE_INT_OP(SExtendI16, "sign_extend16")
+ CASE_I64_OP(SExtendI32, "sign_extend32")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(Block, "block")
@@ -320,6 +325,19 @@ bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsSignExtensionOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprI32SExtendI8:
+ case kExprI32SExtendI16:
+ case kExprI64SExtendI8:
+ case kExprI64SExtendI16:
+ case kExprI64SExtendI32:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 9f8232c902..c6b87f0556 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OPCODES_H_
-#define V8_WASM_OPCODES_H_
+#ifndef V8_WASM_WASM_OPCODES_H_
+#define V8_WASM_WASM_OPCODES_H_
#include "src/globals.h"
#include "src/machine-type.h"
@@ -225,21 +225,26 @@ using WasmName = Vector<const char>;
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
- V(F64ReinterpretI64, 0xbf, d_l)
+ V(F64ReinterpretI64, 0xbf, d_l) \
+ V(I32SExtendI8, 0xc0, i_i) \
+ V(I32SExtendI16, 0xc1, i_i) \
+ V(I64SExtendI8, 0xc2, l_l) \
+ V(I64SExtendI16, 0xc3, l_l) \
+ V(I64SExtendI32, 0xc4, l_l)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc2, d_d) \
- V(F64Asin, 0xc3, d_d) \
- V(F64Atan, 0xc4, d_d) \
- V(F64Cos, 0xc5, d_d) \
- V(F64Sin, 0xc6, d_d) \
- V(F64Tan, 0xc7, d_d) \
- V(F64Exp, 0xc8, d_d) \
- V(F64Log, 0xc9, d_d) \
- V(F64Atan2, 0xca, d_dd) \
- V(F64Pow, 0xcb, d_dd) \
- V(F64Mod, 0xcc, d_dd) \
+ V(F64Acos, 0xc5, d_d) \
+ V(F64Asin, 0xc6, d_d) \
+ V(F64Atan, 0xc7, d_d) \
+ V(F64Cos, 0xc8, d_d) \
+ V(F64Sin, 0xc9, d_d) \
+ V(F64Tan, 0xca, d_d) \
+ V(F64Exp, 0xcb, d_d) \
+ V(F64Log, 0xcc, d_d) \
+ V(F64Atan2, 0xcd, d_dd) \
+ V(F64Pow, 0xce, d_dd) \
+ V(F64Mod, 0xcf, d_dd) \
V(I32AsmjsDivS, 0xd0, i_ii) \
V(I32AsmjsDivU, 0xd1, i_ii) \
V(I32AsmjsRemS, 0xd2, i_ii) \
@@ -403,8 +408,11 @@ using WasmName = Vector<const char>;
V(I32SConvertSatF32, 0xfc00, i_f) \
V(I32UConvertSatF32, 0xfc01, i_f) \
V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d)
-// TODO(kschimpf): Add remaining i64 numeric opcodes.
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
@@ -647,6 +655,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
+ static bool IsSignExtensionOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
@@ -793,4 +802,4 @@ struct WasmInitExpr {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OPCODES_H_
+#endif // V8_WASM_WASM_OPCODES_H_
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 7744b42923..8250db9040 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_RESULT_H_
-#define V8_WASM_RESULT_H_
+#ifndef V8_WASM_WASM_RESULT_H_
+#define V8_WASM_WASM_RESULT_H_
#include <cstdarg>
#include <memory>
@@ -158,4 +158,4 @@ class V8_EXPORT_PRIVATE ErrorThrower {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_RESULT_H_
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 4466672f37..240ffbca3d 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -133,7 +133,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
static size_t GetCodeHeaderSize();
size_t MeasureCode(const WasmCode*) const;
size_t MeasureCopiedStubs() const;
- FixedArray* GetHandlerTable(const WasmCode*) const;
ByteArray* GetSourcePositions(const WasmCode*) const;
void BufferHeader();
@@ -187,7 +186,6 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
: isolate_(isolate), native_module_(module) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
- DCHECK_NULL(native_module_->lazy_builtin_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate_);
@@ -210,12 +208,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
size_t NativeModuleSerializer::MeasureHeader() const {
return sizeof(uint32_t) + // total wasm fct count
- sizeof(
- uint32_t) + // imported fcts - i.e. index of first wasm function
- sizeof(uint32_t) + // table count
- native_module_->specialization_data_.function_tables.size()
- // function table, containing pointers
- * sizeof(GlobalHandleAddress);
+ sizeof(uint32_t); // imported fcts - i.e. index of first wasm function
}
void NativeModuleSerializer::BufferHeader() {
@@ -225,37 +218,25 @@ void NativeModuleSerializer::BufferHeader() {
Writer writer(remaining_);
writer.Write(native_module_->FunctionCount());
writer.Write(native_module_->num_imported_functions());
- writer.Write(static_cast<uint32_t>(
- native_module_->specialization_data_.function_tables.size()));
- for (size_t i = 0,
- e = native_module_->specialization_data_.function_tables.size();
- i < e; ++i) {
- writer.Write(native_module_->specialization_data_.function_tables[i]);
- }
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
return sizeof(size_t) + // size of this section
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
+ sizeof(size_t) + // offset of handler table
sizeof(uint32_t) + // stack slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
- sizeof(uint32_t) + // handler size
sizeof(uint32_t) + // source positions size
sizeof(size_t) + // protected instructions size
sizeof(bool); // is_liftoff
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- FixedArray* handler_table = GetHandlerTable(code);
ByteArray* source_positions = GetSourcePositions(code);
return GetCodeHeaderSize() + code->instructions().size() + // code
code->reloc_info().size() + // reloc info
- (handler_table == nullptr
- ? 0
- : static_cast<uint32_t>(
- handler_table->length())) + // handler table
(source_positions == nullptr
? 0
: static_cast<uint32_t>(
@@ -325,21 +306,6 @@ void NativeModuleSerializer::BufferCopiedStubs() {
}
}
-FixedArray* NativeModuleSerializer::GetHandlerTable(
- const WasmCode* code) const {
- if (code->kind() != WasmCode::kFunction) return nullptr;
- uint32_t index = code->index();
- // We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- Object* handler_table_entry =
- native_module_->compiled_module()->handler_table()->get(
- static_cast<int>(index));
- if (handler_table_entry->IsFixedArray()) {
- return FixedArray::cast(handler_table_entry);
- }
- return nullptr;
-}
-
ByteArray* NativeModuleSerializer::GetSourcePositions(
const WasmCode* code) const {
if (code->kind() != WasmCode::kFunction) return nullptr;
@@ -364,15 +330,7 @@ void NativeModuleSerializer::BufferCurrentWasmCode() {
void NativeModuleSerializer::BufferCodeInAllocatedScratch(
const WasmCode* code) {
// We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- FixedArray* handler_table_entry = GetHandlerTable(code);
- uint32_t handler_table_size = 0;
- Address handler_table = nullptr;
- if (handler_table_entry != nullptr) {
- handler_table_size = static_cast<uint32_t>(handler_table_entry->length());
- handler_table = reinterpret_cast<Address>(
- handler_table_entry->GetFirstElementAddress());
- }
+ // by reloc info, followed by source positions.
ByteArray* source_positions_entry = GetSourcePositions(code);
Address source_positions = nullptr;
uint32_t source_positions_size = 0;
@@ -386,10 +344,10 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
writer.Write(MeasureCode(code));
writer.Write(code->constant_pool_offset());
writer.Write(code->safepoint_table_offset());
+ writer.Write(code->handler_table_offset());
writer.Write(code->stack_slots());
writer.Write(code->instructions().size());
writer.Write(code->reloc_info().size());
- writer.Write(handler_table_size);
writer.Write(source_positions_size);
writer.Write(code->protected_instructions().size());
writer.Write(code->is_liftoff());
@@ -398,7 +356,6 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
// write the code and everything else
writer.WriteVector(code->instructions());
writer.WriteVector(code->reloc_info());
- writer.WriteVector({handler_table, handler_table_size});
writer.WriteVector({source_positions, source_positions_size});
writer.WriteVector(
{reinterpret_cast<const byte*>(code->protected_instructions().data()),
@@ -555,16 +512,6 @@ bool NativeModuleDeserializer::ReadHeader() {
bool ok = functions == native_module_->FunctionCount() &&
imports == native_module_->num_imported_functions();
if (!ok) return false;
- size_t table_count = reader.Read<uint32_t>();
-
- std::vector<GlobalHandleAddress> funcs(table_count);
- for (size_t i = 0; i < table_count; ++i) {
- funcs[i] = reader.Read<GlobalHandleAddress>();
- }
- native_module_->function_tables() = funcs;
- // resize, so that from here on the native module can be
- // asked about num_function_tables().
- native_module_->empty_function_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
@@ -592,10 +539,10 @@ bool NativeModuleDeserializer::ReadCode() {
USE(code_section_size);
size_t constant_pool_offset = reader.Read<size_t>();
size_t safepoint_table_offset = reader.Read<size_t>();
+ size_t handler_table_offset = reader.Read<size_t>();
uint32_t stack_slot_count = reader.Read<uint32_t>();
size_t code_size = reader.Read<size_t>();
size_t reloc_size = reader.Read<size_t>();
- uint32_t handler_size = reader.Read<uint32_t>();
uint32_t source_position_size = reader.Read<uint32_t>();
size_t protected_instructions_size = reader.Read<size_t>();
bool is_liftoff = reader.Read<bool>();
@@ -612,9 +559,10 @@ bool NativeModuleDeserializer::ReadCode() {
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, Just(index_),
WasmCode::kFunction, constant_pool_offset, stack_slot_count,
- safepoint_table_offset, protected_instructions, is_liftoff);
+ safepoint_table_offset, handler_table_offset, protected_instructions,
+ is_liftoff);
if (ret == nullptr) return false;
- native_module_->SetCodeTable(index_, ret);
+ native_module_->code_table_[index_] = ret;
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -634,7 +582,7 @@ bool NativeModuleDeserializer::ReadCode() {
case RelocInfo::CODE_TARGET: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target = GetTrampolineOrStubFromTag(tag);
- iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
+ iter.rinfo()->set_target_address(target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
break;
}
@@ -643,23 +591,14 @@ bool NativeModuleDeserializer::ReadCode() {
reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
Address address =
ExternalReferenceTable::instance(isolate_)->address(orig_target);
- iter.rinfo()->set_target_runtime_entry(
- nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ iter.rinfo()->set_target_runtime_entry(address, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
break;
}
default:
break;
}
}
- if (handler_size > 0) {
- Handle<FixedArray> handler_table = isolate_->factory()->NewFixedArray(
- static_cast<int>(handler_size), TENURED);
- reader.ReadIntoVector(
- {reinterpret_cast<Address>(handler_table->GetFirstElementAddress()),
- handler_size});
- native_module_->compiled_module()->handler_table()->set(
- static_cast<int>(index_), *handler_table);
- }
if (source_position_size > 0) {
Handle<ByteArray> source_positions = isolate_->factory()->NewByteArray(
static_cast<int>(source_position_size), TENURED);
@@ -743,6 +682,10 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
+ // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
+ // requires unlocking the code space here. This should be moved into the
+ // allocator eventually.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
CompileJsToWasmWrappers(isolate, compiled_module, isolate->counters());
WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
compiled_module);
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 9c0e9ce10a..5bb49bfdce 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_SERIALIZATION_H_
-#define V8_WASM_SERIALIZATION_H_
+#ifndef V8_WASM_WASM_SERIALIZATION_H_
+#define V8_WASM_WASM_SERIALIZATION_H_
#include "src/wasm/wasm-objects.h"
@@ -21,4 +21,4 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_SERIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 81c8e41813..1619241332 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -134,7 +134,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
DCHECK_EQ(0, operand.table_index);
- os << "call_indirect " << operand.index;
+ os << "call_indirect " << operand.sig_index;
break;
}
case kExprCallFunction: {
@@ -208,6 +208,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
UNREACHABLE();
break;
}
+ break;
}
// This group is just printed by their internal opcode name, as they
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
index 1608ea9a2d..60957966ab 100644
--- a/deps/v8/src/wasm/wasm-text.h
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_S_EXPR_H_
-#define V8_WASM_S_EXPR_H_
+#ifndef V8_WASM_WASM_TEXT_H_
+#define V8_WASM_WASM_TEXT_H_
#include <cstdint>
#include <ostream>
@@ -35,4 +35,4 @@ void PrintWasmText(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_S_EXPR_H_
+#endif // V8_WASM_WASM_TEXT_H_
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index a30657aee0..22fd13c219 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_VALUE_H_
-#define V8_WASM_VALUE_H_
+#ifndef V8_WASM_WASM_VALUE_H_
+#define V8_WASM_WASM_VALUE_H_
#include "src/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -84,4 +84,4 @@ FOREACH_WASMVAL_TYPE(DECLARE_CAST)
} // namespace internal
} // namespace v8
-#endif // V8_WASM_VALUE_H_
+#endif // V8_WASM_WASM_VALUE_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index a75a8ddd74..eef4158f53 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
@@ -23,9 +23,6 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// Implementation of Assembler
-static const byte kCallOpcode = 0xE8;
-
-
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -95,14 +92,12 @@ void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
-
-void Assembler::emit_rex_64(Register reg, const Operand& op) {
- emit(0x48 | reg.high_bit() << 2 | op.rex_);
+void Assembler::emit_rex_64(Register reg, Operand op) {
+ emit(0x48 | reg.high_bit() << 2 | op.data().rex);
}
-
-void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+void Assembler::emit_rex_64(XMMRegister reg, Operand op) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.data().rex);
}
@@ -111,19 +106,14 @@ void Assembler::emit_rex_64(Register rm_reg) {
emit(0x48 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_64(const Operand& op) {
- emit(0x48 | op.rex_);
-}
-
+void Assembler::emit_rex_64(Operand op) { emit(0x48 | op.data().rex); }
void Assembler::emit_rex_32(Register reg, Register rm_reg) {
emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | reg.high_bit() << 2 | op.rex_);
+void Assembler::emit_rex_32(Register reg, Operand op) {
+ emit(0x40 | reg.high_bit() << 2 | op.data().rex);
}
@@ -131,26 +121,20 @@ void Assembler::emit_rex_32(Register rm_reg) {
emit(0x40 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_32(const Operand& op) {
- emit(0x40 | op.rex_);
-}
-
+void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.data().rex); }
void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
-void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
- byte rex_bits = reg.high_bit() << 2 | op.rex_;
+void Assembler::emit_optional_rex_32(Register reg, Operand op) {
+ byte rex_bits = reg.high_bit() << 2 | op.data().rex;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
+void Assembler::emit_optional_rex_32(XMMRegister reg, Operand op) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.data().rex;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
@@ -181,8 +165,8 @@ void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
-void Assembler::emit_optional_rex_32(const Operand& op) {
- if (op.rex_ != 0) emit(0x40 | op.rex_);
+void Assembler::emit_optional_rex_32(Operand op) {
+ if (op.data().rex != 0) emit(0x40 | op.data().rex);
}
@@ -195,9 +179,8 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
// byte 1 of 3-byte VEX
-void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm,
- LeadingOpcode m) {
- byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
+void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
+ byte rxb = ~((reg.high_bit() << 2) | rm.data().rex) << 5;
emit(rxb | m);
}
@@ -240,11 +223,10 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
emit_vex_prefix(ireg, ivreg, irm, l, pp, mm, w);
}
-
-void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
- const Operand& rm, VectorLength l,
- SIMDPrefix pp, LeadingOpcode mm, VexW w) {
- if (rm.rex_ || mm != k0F || w != kW0) {
+void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg, Operand rm,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
+ VexW w) {
+ if (rm.data().rex || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
@@ -254,8 +236,7 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
}
}
-
-void Assembler::emit_vex_prefix(Register reg, Register vreg, const Operand& rm,
+void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
XMMRegister ireg = XMMRegister::from_code(reg.code());
@@ -268,19 +249,17 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, sizeof(int32_t));
+ Assembler::FlushICache(pc, sizeof(int32_t));
}
}
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -290,8 +269,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
@@ -380,7 +359,7 @@ void RelocInfo::set_target_object(HeapObject* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
@@ -395,22 +374,22 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return origin->runtime_entry_at(pc_);
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -418,11 +397,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
@@ -434,49 +413,6 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-void Operand::set_modrm(int mod, Register rm_reg) {
- DCHECK(is_uint2(mod));
- buf_[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- rex_ |= rm_reg.high_bit();
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK_EQ(len_, 1);
- DCHECK(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- DCHECK(index != rsp || base == rsp || base == r12);
- buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- rex_ |= index.high_bit() << 1 | base.high_bit();
- len_ = 2;
-}
-
-void Operand::set_disp8(int disp) {
- DCHECK(is_int8(disp));
- DCHECK(len_ == 1 || len_ == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int8_t);
-}
-
-void Operand::set_disp32(int disp) {
- DCHECK(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
-}
-
-void Operand::set_disp64(int64_t disp) {
- DCHECK_EQ(1, len_);
- int64_t* p = reinterpret_cast<int64_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(disp);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 38cbfc78d9..0ec50147fd 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -55,7 +55,7 @@ bool OSHasAVXSupport() {
size_t buffer_size = arraysize(buffer);
int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -127,26 +127,26 @@ Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
+void RelocInfo::set_embedded_size(uint32_t size,
ICacheFlushMode icache_flush_mode) {
Memory::uint32_at(pc_) = size;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(uint32_t));
+ Assembler::FlushICache(pc_, sizeof(uint32_t));
}
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -157,133 +157,189 @@ Address RelocInfo::js_to_wasm_address() const {
// -----------------------------------------------------------------------------
// Implementation of Operand
-Operand::Operand(Register base, int32_t disp) : rex_(0) {
- len_ = 1;
- if (base == rsp || base == r12) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
+namespace {
+class OperandBuilder {
+ public:
+ OperandBuilder(Register base, int32_t disp) {
+ if (base == rsp || base == r12) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
- if (disp == 0 && base != rbp && base != r13) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
+ if (disp == 0 && base != rbp && base != r13) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
}
-}
+ OperandBuilder(Register base, Register index, ScaleFactor scale,
+ int32_t disp) {
+ DCHECK(index != rsp);
+ set_sib(scale, index, base);
+ if (disp == 0 && base != rbp && base != r13) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+ }
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- DCHECK(index != rsp);
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && base != rbp && base != r13) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
+ OperandBuilder(Register index, ScaleFactor scale, int32_t disp) {
+ DCHECK(index != rsp);
set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
+ set_sib(scale, index, rbp);
set_disp32(disp);
}
-}
+ OperandBuilder(Label* label, int addend) {
+ data_.addend = addend;
+ DCHECK_NOT_NULL(label);
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+ }
+
+ OperandBuilder(Operand operand, int32_t offset) {
+ DCHECK_GE(operand.data().len, 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.data().buf[0];
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless =
+ (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = *bit_cast<const int32_t*>(&operand.data().buf[disp_offset]);
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
+ }
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- DCHECK(index != rsp);
- len_ = 1;
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
-}
-
-
-Operand::Operand(Label* label) : rex_(0), len_(1) {
- DCHECK_NOT_NULL(label);
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
-}
-
-
-Operand::Operand(const Operand& operand, int32_t offset) {
- DCHECK_GE(operand.len_, 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.buf_[0];
- DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ data_.rex = operand.data().rex;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ data_.len = disp_offset + 4;
+ Memory::int32_at(&data_.buf[disp_offset]) = disp_value;
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ data_.len = disp_offset + 1;
+ data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ data_.buf[0] = (modrm & 0x3F); // Mode 0.
+ data_.len = disp_offset;
+ }
+ if (has_sib) {
+ data_.buf[1] = operand.data().buf[1];
+ }
}
- // Write new operand with same registers, but with modified displacement.
- DCHECK(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- rex_ = operand.rex_;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- len_ = disp_offset + 4;
- Memory::int32_at(&buf_[disp_offset]) = disp_value;
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- len_ = disp_offset + 1;
- buf_[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- buf_[0] = (modrm & 0x3F); // Mode 0.
- len_ = disp_offset;
+ void set_modrm(int mod, Register rm_reg) {
+ DCHECK(is_uint2(mod));
+ data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ // Set REX.B to the high bit of rm.code().
+ data_.rex |= rm_reg.high_bit();
}
- if (has_sib) {
- buf_[1] = operand.buf_[1];
+
+ void set_sib(ScaleFactor scale, Register index, Register base) {
+ DCHECK_EQ(data_.len, 1);
+ DCHECK(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
+ DCHECK(index != rsp || base == rsp || base == r12);
+ data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ data_.rex |= index.high_bit() << 1 | base.high_bit();
+ data_.len = 2;
}
-}
+ void set_disp8(int disp) {
+ DCHECK(is_int8(disp));
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int8_t);
+ }
+
+ void set_disp32(int disp) {
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int32_t);
+ }
+
+ void set_disp64(int64_t disp) {
+ DCHECK_EQ(1, data_.len);
+ int64_t* p = reinterpret_cast<int64_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(disp);
+ }
+
+ const Operand::Data& data() const { return data_; }
+
+ private:
+ Operand::Data data_;
+};
+} // namespace
+
+Operand::Operand(Register base, int32_t disp)
+ : data_(OperandBuilder(base, disp).data()) {}
+
+Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
+ : data_(OperandBuilder(base, index, scale, disp).data()) {}
+
+Operand::Operand(Register index, ScaleFactor scale, int32_t disp)
+ : data_(OperandBuilder(index, scale, disp).data()) {}
+
+Operand::Operand(Label* label, int addend)
+ : data_(OperandBuilder(label, addend).data()) {}
+
+Operand::Operand(Operand operand, int32_t offset)
+ : data_(OperandBuilder(operand, offset).data()) {}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
- DCHECK_NE(buf_[0] & 0xC0, 0xC0); // Always a memory operand.
- // Start with only low three bits of base register. Initial decoding doesn't
- // distinguish on the REX.B bit.
- int base_code = buf_[0] & 0x07;
+ DCHECK_NE(data_.buf[0] & 0xC0, 0xC0); // Always a memory operand.
+ // Start with only low three bits of base register. Initial decoding
+ // doesn't distinguish on the REX.B bit.
+ int base_code = data_.buf[0] & 0x07;
if (base_code == rsp.code()) {
// SIB byte present in buf_[1].
// Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
+ int index_code = ((data_.buf[1] >> 3) & 0x07) | ((data_.rex & 0x02) << 2);
// Index code (including REX.X) of 0x04 (rsp) means no index register.
if (index_code != rsp.code() && index_code == code) return true;
// Add REX.B to get the full base register code.
- base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
+ base_code = (data_.buf[1] & 0x07) | ((data_.rex & 0x01) << 3);
// A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
return code == base_code;
} else {
// A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
// no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- base_code |= ((rex_ & 0x01) << 3);
+ if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
+ base_code |= ((data_.rex & 0x01) << 3);
return code == base_code;
}
}
@@ -527,22 +583,22 @@ void Assembler::GrowBuffer() {
DCHECK(!buffer_overflow());
}
-
-void Assembler::emit_operand(int code, const Operand& adr) {
+void Assembler::emit_operand(int code, Operand adr) {
DCHECK(is_uint3(code));
- const unsigned length = adr.len_;
+ const unsigned length = adr.data().len;
DCHECK_GT(length, 0);
// Emit updated ModR/M byte containing the given register.
- DCHECK_EQ(adr.buf_[0] & 0x38, 0);
- *pc_++ = adr.buf_[0] | code << 3;
+ DCHECK_EQ(adr.data().buf[0] & 0x38, 0);
+ *pc_++ = adr.data().buf[0] | code << 3;
// Recognize RIP relative addressing.
- if (adr.buf_[0] == 5) {
+ if (adr.data().buf[0] == 5) {
DCHECK_EQ(9u, length);
- Label* label = *bit_cast<Label* const*>(&adr.buf_[1]);
+ Label* label = *bit_cast<Label* const*>(&adr.data().buf[1]);
if (label->is_bound()) {
- int offset = label->pos() - pc_offset() - sizeof(int32_t);
+ int offset =
+ label->pos() - pc_offset() - sizeof(int32_t) + adr.data().addend;
DCHECK_GE(0, offset);
emitl(offset);
} else if (label->is_linked()) {
@@ -556,17 +612,14 @@ void Assembler::emit_operand(int code, const Operand& adr) {
}
} else {
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) *pc_++ = adr.buf_[i];
+ for (unsigned i = 1; i < length; i++) *pc_++ = adr.data().buf[i];
}
}
// Assembler Instruction implementations.
-void Assembler::arithmetic_op(byte opcode,
- Register reg,
- const Operand& op,
- int size) {
+void Assembler::arithmetic_op(byte opcode, Register reg, Operand op, int size) {
EnsureSpace ensure_space(this);
emit_rex(reg, op, size);
emit(opcode);
@@ -610,10 +663,7 @@ void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
}
}
-
-void Assembler::arithmetic_op_16(byte opcode,
- Register reg,
- const Operand& rm_reg) {
+void Assembler::arithmetic_op_16(byte opcode, Register reg, Operand rm_reg) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg, rm_reg);
@@ -621,8 +671,7 @@ void Assembler::arithmetic_op_16(byte opcode,
emit_operand(reg, rm_reg);
}
-
-void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
+void Assembler::arithmetic_op_8(byte opcode, Register reg, Operand op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
emit_rex_32(reg, op);
@@ -676,10 +725,8 @@ void Assembler::immediate_arithmetic_op(byte subcode,
}
}
-void Assembler::immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src,
- int size) {
+void Assembler::immediate_arithmetic_op(byte subcode, Operand dst,
+ Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
@@ -714,9 +761,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
}
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
+void Assembler::immediate_arithmetic_op_16(byte subcode, Operand dst,
Immediate src) {
EnsureSpace ensure_space(this);
emit(0x66); // Operand size override prefix.
@@ -732,9 +777,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
}
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
+void Assembler::immediate_arithmetic_op_8(byte subcode, Operand dst,
Immediate src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
@@ -813,8 +856,7 @@ void Assembler::shift(Operand dst, int subcode, int size) {
emit_operand(subcode, dst);
}
-
-void Assembler::bt(const Operand& dst, Register src) {
+void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -822,8 +864,7 @@ void Assembler::bt(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::bts(const Operand& dst, Register src) {
+void Assembler::bts(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -840,8 +881,7 @@ void Assembler::bsrl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsrl(Register dst, const Operand& src) {
+void Assembler::bsrl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -858,8 +898,7 @@ void Assembler::bsrq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsrq(Register dst, const Operand& src) {
+void Assembler::bsrq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -876,8 +915,7 @@ void Assembler::bsfl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsfl(Register dst, const Operand& src) {
+void Assembler::bsfl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -894,8 +932,7 @@ void Assembler::bsfq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsfq(Register dst, const Operand& src) {
+void Assembler::bsfq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -912,7 +949,7 @@ void Assembler::pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::pshufw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufw(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -990,8 +1027,7 @@ void Assembler::call(Register adr) {
emit_modrm(0x2, adr);
}
-
-void Assembler::call(const Operand& op) {
+void Assembler::call(Operand op) {
EnsureSpace ensure_space(this);
// Opcode: FF /2 m64.
emit_optional_rex_32(op);
@@ -1049,8 +1085,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmovq(Condition cc, Register dst, Operand src) {
if (cc == always) {
movq(dst, src);
} else if (cc == never) {
@@ -1081,8 +1116,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmovl(Condition cc, Register dst, Operand src) {
if (cc == always) {
movl(dst, src);
} else if (cc == never) {
@@ -1110,7 +1144,7 @@ void Assembler::lock() {
emit(0xF0);
}
-void Assembler::cmpxchgb(const Operand& dst, Register src) {
+void Assembler::cmpxchgb(Operand dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1123,7 +1157,7 @@ void Assembler::cmpxchgb(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::cmpxchgw(const Operand& dst, Register src) {
+void Assembler::cmpxchgw(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -1132,7 +1166,7 @@ void Assembler::cmpxchgw(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
+void Assembler::emit_cmpxchg(Operand dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
emit(0x0F);
@@ -1168,8 +1202,7 @@ void Assembler::emit_dec(Register dst, int size) {
emit_modrm(0x1, dst);
}
-
-void Assembler::emit_dec(const Operand& dst, int size) {
+void Assembler::emit_dec(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xFF);
@@ -1187,8 +1220,7 @@ void Assembler::decb(Register dst) {
emit_modrm(0x1, dst);
}
-
-void Assembler::decb(const Operand& dst) {
+void Assembler::decb(Operand dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xFE);
@@ -1233,8 +1265,7 @@ void Assembler::emit_imul(Register src, int size) {
emit_modrm(0x5, src);
}
-
-void Assembler::emit_imul(const Operand& src, int size) {
+void Assembler::emit_imul(Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, size);
emit(0xF7);
@@ -1250,8 +1281,7 @@ void Assembler::emit_imul(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
-void Assembler::emit_imul(Register dst, const Operand& src, int size) {
+void Assembler::emit_imul(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x0F);
@@ -1274,9 +1304,7 @@ void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
}
}
-
-void Assembler::emit_imul(Register dst, const Operand& src, Immediate imm,
- int size) {
+void Assembler::emit_imul(Register dst, Operand src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
@@ -1298,8 +1326,7 @@ void Assembler::emit_inc(Register dst, int size) {
emit_modrm(0x0, dst);
}
-
-void Assembler::emit_inc(const Operand& dst, int size) {
+void Assembler::emit_inc(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xFF);
@@ -1483,8 +1510,7 @@ void Assembler::jmp(Register target) {
emit_modrm(0x4, target);
}
-
-void Assembler::jmp(const Operand& src) {
+void Assembler::jmp(Operand src) {
EnsureSpace ensure_space(this);
// Opcode FF/4 m64.
emit_optional_rex_32(src);
@@ -1492,8 +1518,7 @@ void Assembler::jmp(const Operand& src) {
emit_operand(0x4, src);
}
-
-void Assembler::emit_lea(Register dst, const Operand& src, int size) {
+void Assembler::emit_lea(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x8D);
@@ -1529,8 +1554,7 @@ void Assembler::leave() {
emit(0xC9);
}
-
-void Assembler::movb(Register dst, const Operand& src) {
+void Assembler::movb(Register dst, Operand src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1553,8 +1577,7 @@ void Assembler::movb(Register dst, Immediate imm) {
emit(imm.value_);
}
-
-void Assembler::movb(const Operand& dst, Register src) {
+void Assembler::movb(Operand dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1566,8 +1589,7 @@ void Assembler::movb(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::movb(const Operand& dst, Immediate imm) {
+void Assembler::movb(Operand dst, Immediate imm) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xC6);
@@ -1575,8 +1597,7 @@ void Assembler::movb(const Operand& dst, Immediate imm) {
emit(static_cast<byte>(imm.value_));
}
-
-void Assembler::movw(Register dst, const Operand& src) {
+void Assembler::movw(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -1584,8 +1605,7 @@ void Assembler::movw(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movw(const Operand& dst, Register src) {
+void Assembler::movw(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -1593,8 +1613,7 @@ void Assembler::movw(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::movw(const Operand& dst, Immediate imm) {
+void Assembler::movw(Operand dst, Immediate imm) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst);
@@ -1604,8 +1623,7 @@ void Assembler::movw(const Operand& dst, Immediate imm) {
emit(static_cast<byte>(imm.value_ >> 8));
}
-
-void Assembler::emit_mov(Register dst, const Operand& src, int size) {
+void Assembler::emit_mov(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x8B);
@@ -1626,8 +1644,7 @@ void Assembler::emit_mov(Register dst, Register src, int size) {
}
}
-
-void Assembler::emit_mov(const Operand& dst, Register src, int size) {
+void Assembler::emit_mov(Operand dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
emit(0x89);
@@ -1648,8 +1665,7 @@ void Assembler::emit_mov(Register dst, Immediate value, int size) {
emit(value);
}
-
-void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
+void Assembler::emit_mov(Operand dst, Immediate value, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xC7);
@@ -1689,7 +1705,7 @@ void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
// Loads the ip-relative location of the src label into the target location
// (as a 32-bit offset sign extended to 64-bit).
-void Assembler::movl(const Operand& dst, Label* src) {
+void Assembler::movl(Operand dst, Label* src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xC7);
@@ -1723,8 +1739,7 @@ void Assembler::movsxbl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxbl(Register dst, const Operand& src) {
+void Assembler::movsxbl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -1732,8 +1747,7 @@ void Assembler::movsxbl(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movsxbq(Register dst, const Operand& src) {
+void Assembler::movsxbq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -1757,8 +1771,7 @@ void Assembler::movsxwl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxwl(Register dst, const Operand& src) {
+void Assembler::movsxwl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -1766,8 +1779,7 @@ void Assembler::movsxwl(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movsxwq(Register dst, const Operand& src) {
+void Assembler::movsxwq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -1790,16 +1802,14 @@ void Assembler::movsxlq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxlq(Register dst, const Operand& src) {
+void Assembler::movsxlq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x63);
emit_operand(dst, src);
}
-
-void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
+void Assembler::emit_movzxb(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1825,8 +1835,7 @@ void Assembler::emit_movzxb(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
-void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
+void Assembler::emit_movzxw(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1878,8 +1887,7 @@ void Assembler::mull(Register src) {
emit_modrm(0x4, src);
}
-
-void Assembler::mull(const Operand& src) {
+void Assembler::mull(Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xF7);
@@ -1902,8 +1910,7 @@ void Assembler::emit_neg(Register dst, int size) {
emit_modrm(0x3, dst);
}
-
-void Assembler::emit_neg(const Operand& dst, int size) {
+void Assembler::emit_neg(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1924,8 +1931,7 @@ void Assembler::emit_not(Register dst, int size) {
emit_modrm(0x2, dst);
}
-
-void Assembler::emit_not(const Operand& dst, int size) {
+void Assembler::emit_not(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xF7);
@@ -1953,6 +1959,7 @@ void Assembler::Nop(int n) {
switch (n) {
case 2:
emit(0x66);
+ V8_FALLTHROUGH;
case 1:
emit(0x90);
return;
@@ -1969,6 +1976,7 @@ void Assembler::Nop(int n) {
return;
case 6:
emit(0x66);
+ V8_FALLTHROUGH;
case 5:
emit(0x0F);
emit(0x1F);
@@ -1989,12 +1997,15 @@ void Assembler::Nop(int n) {
case 11:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 10:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 9:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 8:
emit(0x0F);
emit(0x1F);
@@ -2016,8 +2027,7 @@ void Assembler::popq(Register dst) {
emit(0x58 | dst.low_bits());
}
-
-void Assembler::popq(const Operand& dst) {
+void Assembler::popq(Operand dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -2037,8 +2047,7 @@ void Assembler::pushq(Register src) {
emit(0x50 | src.low_bits());
}
-
-void Assembler::pushq(const Operand& src) {
+void Assembler::pushq(Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -2125,7 +2134,7 @@ void Assembler::shrd(Register dst, Register src) {
emit_modrm(src, dst);
}
-void Assembler::xchgb(Register reg, const Operand& op) {
+void Assembler::xchgb(Register reg, Operand op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -2137,7 +2146,7 @@ void Assembler::xchgb(Register reg, const Operand& op) {
emit_operand(reg, op);
}
-void Assembler::xchgw(Register reg, const Operand& op) {
+void Assembler::xchgw(Register reg, Operand op) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg, op);
@@ -2162,8 +2171,7 @@ void Assembler::emit_xchg(Register dst, Register src, int size) {
}
}
-
-void Assembler::emit_xchg(Register dst, const Operand& src, int size) {
+void Assembler::emit_xchg(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x87);
@@ -2193,6 +2201,12 @@ void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
+void Assembler::sub_sp_32(uint32_t imm) {
+ emit_rex_64();
+ emit(0x81); // using a literal 32-bit immediate.
+ emit_modrm(0x5, rsp);
+ emitl(imm);
+}
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
@@ -2204,13 +2218,12 @@ void Assembler::testb(Register reg, Immediate mask) {
emit_test(reg, mask, sizeof(int8_t));
}
-void Assembler::testb(const Operand& op, Immediate mask) {
+void Assembler::testb(Operand op, Immediate mask) {
DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
emit_test(op, mask, sizeof(int8_t));
}
-
-void Assembler::testb(const Operand& op, Register reg) {
+void Assembler::testb(Operand op, Register reg) {
emit_test(op, reg, sizeof(int8_t));
}
@@ -2222,11 +2235,11 @@ void Assembler::testw(Register reg, Immediate mask) {
emit_test(reg, mask, sizeof(int16_t));
}
-void Assembler::testw(const Operand& op, Immediate mask) {
+void Assembler::testw(Operand op, Immediate mask) {
emit_test(op, mask, sizeof(int16_t));
}
-void Assembler::testw(const Operand& op, Register reg) {
+void Assembler::testw(Operand op, Register reg) {
emit_test(op, reg, sizeof(int16_t));
}
@@ -2285,7 +2298,7 @@ void Assembler::emit_test(Register reg, Immediate mask, int size) {
}
}
-void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
+void Assembler::emit_test(Operand op, Immediate mask, int size) {
if (is_uint8(mask.value_)) {
size = sizeof(int8_t);
} else if (is_uint16(mask.value_)) {
@@ -2313,7 +2326,7 @@ void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
}
}
-void Assembler::emit_test(const Operand& op, Register reg, int size) {
+void Assembler::emit_test(Operand op, Register reg, int size) {
EnsureSpace ensure_space(this);
if (size == sizeof(int16_t)) {
emit(0x66);
@@ -2372,32 +2385,28 @@ void Assembler::fldln2() {
emit(0xED);
}
-
-void Assembler::fld_s(const Operand& adr) {
+void Assembler::fld_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
}
-
-void Assembler::fld_d(const Operand& adr) {
+void Assembler::fld_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
}
-
-void Assembler::fstp_s(const Operand& adr) {
+void Assembler::fstp_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
}
-
-void Assembler::fstp_d(const Operand& adr) {
+void Assembler::fstp_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDD);
@@ -2411,32 +2420,28 @@ void Assembler::fstp(int index) {
emit_farith(0xDD, 0xD8, index);
}
-
-void Assembler::fild_s(const Operand& adr) {
+void Assembler::fild_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
}
-
-void Assembler::fild_d(const Operand& adr) {
+void Assembler::fild_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
}
-
-void Assembler::fistp_s(const Operand& adr) {
+void Assembler::fistp_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
}
-
-void Assembler::fisttp_s(const Operand& adr) {
+void Assembler::fisttp_s(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
@@ -2444,8 +2449,7 @@ void Assembler::fisttp_s(const Operand& adr) {
emit_operand(1, adr);
}
-
-void Assembler::fisttp_d(const Operand& adr) {
+void Assembler::fisttp_d(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
@@ -2453,16 +2457,14 @@ void Assembler::fisttp_d(const Operand& adr) {
emit_operand(1, adr);
}
-
-void Assembler::fist_s(const Operand& adr) {
+void Assembler::fist_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
}
-
-void Assembler::fistp_d(const Operand& adr) {
+void Assembler::fistp_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDF);
@@ -2544,8 +2546,7 @@ void Assembler::fsub(int i) {
emit_farith(0xDC, 0xE8, i);
}
-
-void Assembler::fisub_s(const Operand& adr) {
+void Assembler::fisub_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDA);
@@ -2723,8 +2724,7 @@ void Assembler::andps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andps(XMMRegister dst, const Operand& src) {
+void Assembler::andps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2741,8 +2741,7 @@ void Assembler::orps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::orps(XMMRegister dst, const Operand& src) {
+void Assembler::orps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2760,8 +2759,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorps(XMMRegister dst, const Operand& src) {
+void Assembler::xorps(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2779,8 +2777,7 @@ void Assembler::addps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addps(XMMRegister dst, const Operand& src) {
+void Assembler::addps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2797,8 +2794,7 @@ void Assembler::subps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subps(XMMRegister dst, const Operand& src) {
+void Assembler::subps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2815,8 +2811,7 @@ void Assembler::mulps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulps(XMMRegister dst, const Operand& src) {
+void Assembler::mulps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2833,8 +2828,7 @@ void Assembler::divps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divps(XMMRegister dst, const Operand& src) {
+void Assembler::divps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2855,8 +2849,7 @@ void Assembler::movd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
+void Assembler::movd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2919,8 +2912,7 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
}
}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(src, dst);
@@ -2929,8 +2921,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+void Assembler::movdqa(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2939,8 +2930,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src) {
+void Assembler::movdqu(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(src, dst);
@@ -2949,8 +2939,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+void Assembler::movdqu(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(dst, src);
@@ -2986,7 +2975,7 @@ void Assembler::pextrb(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrb(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3010,7 +2999,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3034,7 +3023,7 @@ void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrw(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3059,7 +3048,7 @@ void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3083,8 +3072,7 @@ void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3108,7 +3096,7 @@ void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3133,7 +3121,7 @@ void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
emit(imm8);
}
-void Assembler::movsd(const Operand& dst, XMMRegister src) {
+void Assembler::movsd(Operand dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
@@ -3154,8 +3142,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
+void Assembler::movsd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
@@ -3214,8 +3201,7 @@ void Assembler::movapd(XMMRegister dst, XMMRegister src) {
}
}
-
-void Assembler::movupd(XMMRegister dst, const Operand& src) {
+void Assembler::movupd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3224,7 +3210,7 @@ void Assembler::movupd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::movupd(const Operand& dst, XMMRegister src) {
+void Assembler::movupd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -3242,8 +3228,7 @@ void Assembler::addss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addss(XMMRegister dst, const Operand& src) {
+void Assembler::addss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3262,8 +3247,7 @@ void Assembler::subss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subss(XMMRegister dst, const Operand& src) {
+void Assembler::subss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3282,8 +3266,7 @@ void Assembler::mulss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulss(XMMRegister dst, const Operand& src) {
+void Assembler::mulss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3302,8 +3285,7 @@ void Assembler::divss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divss(XMMRegister dst, const Operand& src) {
+void Assembler::divss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3322,8 +3304,7 @@ void Assembler::maxss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxss(XMMRegister dst, const Operand& src) {
+void Assembler::maxss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3342,8 +3323,7 @@ void Assembler::minss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minss(XMMRegister dst, const Operand& src) {
+void Assembler::minss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3362,8 +3342,7 @@ void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3382,8 +3361,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::ucomiss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -3403,8 +3381,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
+void Assembler::movss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
@@ -3414,8 +3391,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(const Operand& src, XMMRegister dst) {
+void Assembler::movss(Operand src, XMMRegister dst) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
@@ -3518,7 +3494,7 @@ void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3537,7 +3513,7 @@ void Assembler::cmppd(XMMRegister dst, XMMRegister src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cmppd(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmppd(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x66);
@@ -3547,7 +3523,7 @@ void Assembler::cmppd(XMMRegister dst, const Operand& src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cvttss2si(Register dst, const Operand& src) {
+void Assembler::cvttss2si(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3568,8 +3544,7 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
+void Assembler::cvttsd2si(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3601,8 +3576,7 @@ void Assembler::cvttss2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttss2siq(Register dst, const Operand& src) {
+void Assembler::cvttss2siq(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3623,8 +3597,7 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+void Assembler::cvttsd2siq(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3634,8 +3607,7 @@ void Assembler::cvttsd2siq(Register dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtlsi2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3656,8 +3628,7 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtlsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtlsi2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3677,8 +3648,7 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtqsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtqsi2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3699,8 +3669,7 @@ void Assembler::cvtqsi2ss(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtqsi2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3732,8 +3701,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3754,8 +3722,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3797,8 +3764,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
+void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3817,8 +3783,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+void Assembler::mulsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3837,8 +3802,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subsd(XMMRegister dst, const Operand& src) {
+void Assembler::subsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3857,8 +3821,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divsd(XMMRegister dst, const Operand& src) {
+void Assembler::divsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3877,8 +3840,7 @@ void Assembler::maxsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+void Assembler::maxsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3897,8 +3859,7 @@ void Assembler::minsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minsd(XMMRegister dst, const Operand& src) {
+void Assembler::minsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3917,8 +3878,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andpd(XMMRegister dst, const Operand& src) {
+void Assembler::andpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3937,8 +3897,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::orpd(XMMRegister dst, const Operand& src) {
+void Assembler::orpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3958,8 +3917,7 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+void Assembler::xorpd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3980,8 +3938,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtsd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4000,7 +3957,7 @@ void Assembler::haddps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::haddps(XMMRegister dst, const Operand& src) {
+void Assembler::haddps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -4019,8 +3976,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::ucomisd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4100,7 +4056,7 @@ void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::punpckldq(XMMRegister dst, const Operand& src) {
+void Assembler::punpckldq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -4129,9 +4085,8 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
@@ -4149,9 +4104,8 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
@@ -4169,8 +4123,7 @@ void Assembler::vmovd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vmovd(XMMRegister dst, const Operand& src) {
+void Assembler::vmovd(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
@@ -4198,8 +4151,7 @@ void Assembler::vmovq(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vmovq(XMMRegister dst, const Operand& src) {
+void Assembler::vmovq(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
@@ -4227,9 +4179,8 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -4247,9 +4198,7 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
@@ -4267,9 +4216,7 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
@@ -4286,8 +4233,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::vucomiss(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
@@ -4305,9 +4251,7 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF3, k0F, kWIG);
@@ -4324,8 +4268,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
emit_modrm(reg, rm);
}
-
-void Assembler::bmi1q(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
@@ -4342,8 +4285,7 @@ void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
emit_modrm(reg, rm);
}
-
-void Assembler::bmi1l(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1l(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
@@ -4362,8 +4304,7 @@ void Assembler::tzcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::tzcntq(Register dst, const Operand& src) {
+void Assembler::tzcntq(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4384,8 +4325,7 @@ void Assembler::tzcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::tzcntl(Register dst, const Operand& src) {
+void Assembler::tzcntl(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4406,8 +4346,7 @@ void Assembler::lzcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::lzcntq(Register dst, const Operand& src) {
+void Assembler::lzcntq(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4428,8 +4367,7 @@ void Assembler::lzcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::lzcntl(Register dst, const Operand& src) {
+void Assembler::lzcntl(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4450,8 +4388,7 @@ void Assembler::popcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::popcntq(Register dst, const Operand& src) {
+void Assembler::popcntq(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4472,8 +4409,7 @@ void Assembler::popcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::popcntl(Register dst, const Operand& src) {
+void Assembler::popcntl(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4493,9 +4429,8 @@ void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_modrm(reg, rm);
}
-
void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, pp, k0F38, kW1);
@@ -4513,9 +4448,8 @@ void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_modrm(reg, rm);
}
-
void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, pp, k0F38, kW0);
@@ -4535,8 +4469,7 @@ void Assembler::rorxq(Register dst, Register src, byte imm8) {
emit(imm8);
}
-
-void Assembler::rorxq(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorxq(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -4559,8 +4492,7 @@ void Assembler::rorxl(Register dst, Register src, byte imm8) {
emit(imm8);
}
-
-void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorxl(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -4584,7 +4516,7 @@ void Assembler::minps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, const Operand& src) {
+void Assembler::minps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4600,7 +4532,7 @@ void Assembler::maxps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, const Operand& src) {
+void Assembler::maxps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4616,7 +4548,7 @@ void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4632,7 +4564,7 @@ void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4648,7 +4580,7 @@ void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4664,7 +4596,7 @@ void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4688,7 +4620,7 @@ void Assembler::movups(XMMRegister dst, XMMRegister src) {
}
}
-void Assembler::movups(XMMRegister dst, const Operand& src) {
+void Assembler::movups(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4696,7 +4628,7 @@ void Assembler::movups(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::movups(const Operand& dst, XMMRegister src) {
+void Assembler::movups(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -4714,7 +4646,7 @@ void Assembler::sse2_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
emit(prefix);
@@ -4736,7 +4668,7 @@ void Assembler::ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::ssse3_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
@@ -4760,7 +4692,7 @@ void Assembler::sse4_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -4772,7 +4704,7 @@ void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::lddqu(XMMRegister dst, const Operand& src) {
+void Assembler::lddqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4822,7 +4754,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -4832,13 +4764,12 @@ void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
-
-void Assembler::emit_sse_operand(Register reg, const Operand& adr) {
+void Assembler::emit_sse_operand(Register reg, Operand adr) {
emit_operand(reg, adr);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 1c838b964b..a532729d15 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -81,9 +81,6 @@ namespace internal {
V(r14) \
V(r15)
-// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
-constexpr int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -316,7 +313,7 @@ class Immediate BASE_EMBEDDED {
private:
int32_t value_;
- RelocInfo::Mode rmode_ = RelocInfo::NONE32;
+ RelocInfo::Mode rmode_ = RelocInfo::NONE;
friend class Assembler;
};
@@ -325,7 +322,7 @@ class Immediate BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Machine instruction Operands
-enum ScaleFactor {
+enum ScaleFactor : int8_t {
times_1 = 0,
times_2 = 1,
times_4 = 2,
@@ -334,9 +331,15 @@ enum ScaleFactor {
times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
-
-class Operand BASE_EMBEDDED {
+class Operand {
public:
+ struct Data {
+ byte rex = 0;
+ byte buf[9];
+ byte len = 1; // number of bytes of buf_ in use.
+ int8_t addend; // for rip + offset + addend.
+ };
+
// [base + disp/r]
Operand(Register base, int32_t disp);
@@ -354,10 +357,12 @@ class Operand BASE_EMBEDDED {
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
// this must not overflow.
- Operand(const Operand& base, int32_t offset);
+ Operand(Operand base, int32_t offset);
// [rip + disp/r]
- explicit Operand(Label* label);
+ explicit Operand(Label* label, int addend = 0);
+
+ Operand(const Operand&) = default;
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
@@ -365,33 +370,29 @@ class Operand BASE_EMBEDDED {
// Queries related to the size of the generated instruction.
// Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return rex_ != 0; }
+ bool requires_rex() const { return data_.rex != 0; }
// Size of the ModR/M, SIB and displacement parts of the generated
// instruction.
- int operand_size() const { return len_; }
-
- private:
- byte rex_;
- byte buf_[9];
- // The number of bytes of buf_ in use.
- byte len_;
-
- // Set the ModR/M byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- // set_modrm can be called before or after set_sib and set_disp*.
- inline void set_modrm(int mod, Register rm);
-
- // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
- inline void set_sib(ScaleFactor scale, Register index, Register base);
+ int operand_size() const { return data_.len; }
- // Adds operand displacement fields (offsets added to the memory address).
- // Needs to be called after set_sib, not before it.
- inline void set_disp8(int disp);
- inline void set_disp32(int disp);
- inline void set_disp64(int64_t disp); // for labels.
+ const Data& data() const { return data_; }
- friend class Assembler;
+ private:
+ const Data data_;
};
+static_assert(sizeof(Operand) <= 2 * kPointerSize,
+ "Operand must be small enough to pass it by value");
+// Unfortunately, MSVC 2015 is broken in that both is_trivially_destructible and
+// is_trivially_copy_constructible are true, but is_trivially_copyable is false.
+// (status at 2018-02-26, observed on the msvc waterfall bot).
+#if V8_CC_MSVC
+static_assert(std::is_trivially_copy_constructible<Operand>::value &&
+ std::is_trivially_destructible<Operand>::value,
+ "Operand must be trivially copyable to pass it by value");
+#else
+static_assert(IS_TRIVIALLY_COPYABLE(Operand),
+ "Operand must be trivially copyable to pass it by value");
+#endif
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
@@ -476,7 +477,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -486,23 +487,13 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- static inline RelocInfo::Mode RelocInfoNone() {
- if (kPointerSize == kInt64Size) {
- return RelocInfo::NONE64;
- } else {
- DCHECK_EQ(kPointerSize, kInt32Size);
- return RelocInfo::NONE32;
- }
- }
-
inline Handle<Code> code_target_object_handle_at(Address pc);
inline Address runtime_entry_at(Address pc);
// Number of bytes taken up by the branch target in the code.
@@ -629,29 +620,29 @@ class Assembler : public AssemblerBase {
// 32 bit value, the normal push will optimize the 8 bit case.
void pushq_imm32(int32_t imm32);
void pushq(Register src);
- void pushq(const Operand& src);
+ void pushq(Operand src);
void popq(Register dst);
- void popq(const Operand& dst);
+ void popq(Operand dst);
void enter(Immediate size);
void leave();
// Moves
- void movb(Register dst, const Operand& src);
+ void movb(Register dst, Operand src);
void movb(Register dst, Immediate imm);
- void movb(const Operand& dst, Register src);
- void movb(const Operand& dst, Immediate imm);
+ void movb(Operand dst, Register src);
+ void movb(Operand dst, Immediate imm);
// Move the low 16 bits of a 64-bit register value to a 16-bit
// memory location.
- void movw(Register dst, const Operand& src);
- void movw(const Operand& dst, Register src);
- void movw(const Operand& dst, Immediate imm);
+ void movw(Register dst, Operand src);
+ void movw(Operand dst, Register src);
+ void movw(Operand dst, Immediate imm);
// Move the offset of the label location relative to the current
// position (after the move) to the destination.
- void movl(const Operand& dst, Label* src);
+ void movl(Operand dst, Label* src);
// Loads a pointer into a register with a relocation mode.
void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
@@ -667,20 +658,20 @@ class Assembler : public AssemblerBase {
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE64);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
void movq(Register dst, uint64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE64);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
void movsxbl(Register dst, Register src);
- void movsxbl(Register dst, const Operand& src);
+ void movsxbl(Register dst, Operand src);
void movsxbq(Register dst, Register src);
- void movsxbq(Register dst, const Operand& src);
+ void movsxbq(Register dst, Operand src);
void movsxwl(Register dst, Register src);
- void movsxwl(Register dst, const Operand& src);
+ void movsxwl(Register dst, Operand src);
void movsxwq(Register dst, Register src);
- void movsxwq(Register dst, const Operand& src);
+ void movsxwq(Register dst, Operand src);
void movsxlq(Register dst, Register src);
- void movsxlq(Register dst, const Operand& src);
+ void movsxlq(Register dst, Operand src);
// Repeated moves.
@@ -696,9 +687,9 @@ class Assembler : public AssemblerBase {
// Conditional moves.
void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, const Operand& src);
+ void cmovq(Condition cc, Register dst, Operand src);
void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, const Operand& src);
+ void cmovl(Condition cc, Register dst, Operand src);
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
@@ -710,19 +701,15 @@ class Assembler : public AssemblerBase {
arithmetic_op_8(0x3A, dst, src);
}
- void cmpb(Register dst, const Operand& src) {
- arithmetic_op_8(0x3A, dst, src);
- }
+ void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
- void cmpb(const Operand& dst, Register src) {
- arithmetic_op_8(0x38, src, dst);
- }
+ void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
- void cmpb(const Operand& dst, Immediate src) {
+ void cmpb(Operand dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
- void cmpw(const Operand& dst, Immediate src) {
+ void cmpw(Operand dst, Immediate src) {
immediate_arithmetic_op_16(0x7, dst, src);
}
@@ -730,37 +717,33 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_16(0x7, dst, src);
}
- void cmpw(Register dst, const Operand& src) {
- arithmetic_op_16(0x3B, dst, src);
- }
+ void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
void cmpw(Register dst, Register src) {
arithmetic_op_16(0x3B, dst, src);
}
- void cmpw(const Operand& dst, Register src) {
- arithmetic_op_16(0x39, src, dst);
- }
+ void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
- void testb(Register reg, const Operand& op) { testb(op, reg); }
+ void testb(Register reg, Operand op) { testb(op, reg); }
- void testw(Register reg, const Operand& op) { testw(op, reg); }
+ void testw(Register reg, Operand op) { testw(op, reg); }
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
void decb(Register dst);
- void decb(const Operand& dst);
+ void decb(Operand dst);
// Lock prefix.
void lock();
- void xchgb(Register reg, const Operand& op);
- void xchgw(Register reg, const Operand& op);
+ void xchgb(Register reg, Operand op);
+ void xchgw(Register reg, Operand op);
- void cmpxchgb(const Operand& dst, Register src);
- void cmpxchgw(const Operand& dst, Register src);
+ void cmpxchgb(Operand dst, Register src);
+ void cmpxchgw(Operand dst, Register src);
// Sign-extends rax into rdx:rax.
void cqo();
@@ -769,7 +752,7 @@ class Assembler : public AssemblerBase {
// Multiply eax by src, put the result in edx:eax.
void mull(Register src);
- void mull(const Operand& src);
+ void mull(Operand src);
// Multiply rax by src, put the result in rdx:rax.
void mulq(Register src);
@@ -825,27 +808,29 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_8(0x5, dst, src);
}
+ void sub_sp_32(uint32_t imm);
+
void testb(Register dst, Register src);
void testb(Register reg, Immediate mask);
- void testb(const Operand& op, Immediate mask);
- void testb(const Operand& op, Register reg);
+ void testb(Operand op, Immediate mask);
+ void testb(Operand op, Register reg);
void testw(Register dst, Register src);
void testw(Register reg, Immediate mask);
- void testw(const Operand& op, Immediate mask);
- void testw(const Operand& op, Register reg);
+ void testw(Operand op, Immediate mask);
+ void testw(Operand op, Register reg);
// Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
+ void bt(Operand dst, Register src);
+ void bts(Operand dst, Register src);
void bsrq(Register dst, Register src);
- void bsrq(Register dst, const Operand& src);
+ void bsrq(Register dst, Operand src);
void bsrl(Register dst, Register src);
- void bsrl(Register dst, const Operand& src);
+ void bsrl(Register dst, Operand src);
void bsfq(Register dst, Register src);
- void bsfq(Register dst, const Operand& src);
+ void bsfq(Register dst, Operand src);
void bsfl(Register dst, Register src);
- void bsfl(Register dst, const Operand& src);
+ void bsfl(Register dst, Operand src);
// Miscellaneous
void clc();
@@ -859,7 +844,7 @@ class Assembler : public AssemblerBase {
void setcc(Condition cc, Register reg);
void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pshufw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -906,7 +891,7 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- void jmp(const Operand& src);
+ void jmp(Operand src);
// Conditional jumps
void j(Condition cc,
@@ -923,23 +908,23 @@ class Assembler : public AssemblerBase {
void fldpi();
void fldln2();
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
+ void fld_s(Operand adr);
+ void fld_d(Operand adr);
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
+ void fstp_s(Operand adr);
+ void fstp_d(Operand adr);
void fstp(int index);
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
+ void fild_s(Operand adr);
+ void fild_d(Operand adr);
- void fist_s(const Operand& adr);
+ void fist_s(Operand adr);
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
+ void fistp_s(Operand adr);
+ void fistp_d(Operand adr);
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
+ void fisttp_s(Operand adr);
+ void fisttp_d(Operand adr);
void fabs();
void fchs();
@@ -949,7 +934,7 @@ class Assembler : public AssemblerBase {
void fmul(int i);
void fdiv(int i);
- void fisub_s(const Operand& adr);
+ void fisub_s(Operand adr);
void faddp(int i = 1);
void fsubp(int i = 1);
@@ -988,24 +973,24 @@ class Assembler : public AssemblerBase {
// SSE instructions
void addss(XMMRegister dst, XMMRegister src);
- void addss(XMMRegister dst, const Operand& src);
+ void addss(XMMRegister dst, Operand src);
void subss(XMMRegister dst, XMMRegister src);
- void subss(XMMRegister dst, const Operand& src);
+ void subss(XMMRegister dst, Operand src);
void mulss(XMMRegister dst, XMMRegister src);
- void mulss(XMMRegister dst, const Operand& src);
+ void mulss(XMMRegister dst, Operand src);
void divss(XMMRegister dst, XMMRegister src);
- void divss(XMMRegister dst, const Operand& src);
+ void divss(XMMRegister dst, Operand src);
void maxss(XMMRegister dst, XMMRegister src);
- void maxss(XMMRegister dst, const Operand& src);
+ void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src);
- void minss(XMMRegister dst, const Operand& src);
+ void minss(XMMRegister dst, Operand src);
void sqrtss(XMMRegister dst, XMMRegister src);
- void sqrtss(XMMRegister dst, const Operand& src);
+ void sqrtss(XMMRegister dst, Operand src);
void ucomiss(XMMRegister dst, XMMRegister src);
- void ucomiss(XMMRegister dst, const Operand& src);
+ void ucomiss(XMMRegister dst, Operand src);
void movaps(XMMRegister dst, XMMRegister src);
// Don't use this unless it's important to keep the
@@ -1014,48 +999,48 @@ class Assembler : public AssemblerBase {
// values in xmm registers.
void movss(XMMRegister dst, XMMRegister src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movss(XMMRegister dst, Operand src);
+ void movss(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
- void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, Operand src);
void cvttss2si(Register dst, XMMRegister src);
- void cvtlsi2ss(XMMRegister dst, const Operand& src);
+ void cvtlsi2ss(XMMRegister dst, Operand src);
void cvtlsi2ss(XMMRegister dst, Register src);
void andps(XMMRegister dst, XMMRegister src);
- void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, Operand src);
void orps(XMMRegister dst, XMMRegister src);
- void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, Operand src);
void xorps(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, Operand src);
void addps(XMMRegister dst, XMMRegister src);
- void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, Operand src);
void subps(XMMRegister dst, XMMRegister src);
- void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, Operand src);
void mulps(XMMRegister dst, XMMRegister src);
- void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, Operand src);
void divps(XMMRegister dst, XMMRegister src);
- void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, Operand src);
void movmskps(Register dst, XMMRegister src);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
- void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
// SSE2 instructions
void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
byte opcode);
- void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+ void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
}
@@ -1066,8 +1051,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
}
@@ -1075,20 +1059,20 @@ class Assembler : public AssemblerBase {
#undef DECLARE_SSE2_AVX_INSTRUCTION
// SSE3
- void lddqu(XMMRegister dst, const Operand& src);
+ void lddqu(XMMRegister dst, Operand src);
// SSSE3
void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
byte escape2, byte opcode);
- void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
+ void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1098,14 +1082,14 @@ class Assembler : public AssemblerBase {
// SSE4
void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
byte escape2, byte opcode);
- void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
+ void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1117,8 +1101,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
}
@@ -1127,7 +1110,7 @@ class Assembler : public AssemblerBase {
#undef DECLARE_SSE34_AVX_INSTRUCTION
void movd(XMMRegister dst, Register src);
- void movd(XMMRegister dst, const Operand& src);
+ void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
@@ -1139,18 +1122,18 @@ class Assembler : public AssemblerBase {
// values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
- void movsd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, const Operand& src);
+ void movsd(Operand dst, XMMRegister src);
+ void movsd(XMMRegister dst, Operand src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqa(XMMRegister dst, const Operand& src);
+ void movdqa(Operand dst, XMMRegister src);
+ void movdqa(XMMRegister dst, Operand src);
- void movdqu(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
+ void movdqu(Operand dst, XMMRegister src);
+ void movdqu(XMMRegister dst, Operand src);
void movapd(XMMRegister dst, XMMRegister src);
- void movupd(XMMRegister dst, const Operand& src);
- void movupd(const Operand& dst, XMMRegister src);
+ void movupd(XMMRegister dst, Operand src);
+ void movupd(Operand dst, XMMRegister src);
void psllq(XMMRegister reg, byte imm8);
void psrlq(XMMRegister reg, byte imm8);
@@ -1161,98 +1144,96 @@ class Assembler : public AssemblerBase {
void psraw(XMMRegister reg, byte imm8);
void psrad(XMMRegister reg, byte imm8);
- void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttss2siq(Register dst, XMMRegister src);
- void cvttss2siq(Register dst, const Operand& src);
+ void cvttss2siq(Register dst, Operand src);
void cvttsd2siq(Register dst, XMMRegister src);
- void cvttsd2siq(Register dst, const Operand& src);
+ void cvttsd2siq(Register dst, Operand src);
- void cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, Operand src);
void cvtlsi2sd(XMMRegister dst, Register src);
- void cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void cvtqsi2ss(XMMRegister dst, Operand src);
void cvtqsi2ss(XMMRegister dst, Register src);
- void cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtss2sd(XMMRegister dst, Operand src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, Operand src);
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
+ void addsd(XMMRegister dst, Operand src);
void subsd(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, const Operand& src);
+ void subsd(XMMRegister dst, Operand src);
void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
+ void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, const Operand& src);
+ void divsd(XMMRegister dst, Operand src);
void maxsd(XMMRegister dst, XMMRegister src);
- void maxsd(XMMRegister dst, const Operand& src);
+ void maxsd(XMMRegister dst, Operand src);
void minsd(XMMRegister dst, XMMRegister src);
- void minsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, Operand src);
void andpd(XMMRegister dst, XMMRegister src);
- void andpd(XMMRegister dst, const Operand& src);
+ void andpd(XMMRegister dst, Operand src);
void orpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, const Operand& src);
+ void orpd(XMMRegister dst, Operand src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, const Operand& src);
+ void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, const Operand& src);
+ void sqrtsd(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src);
- void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, Operand src);
void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
+ void ucomisd(XMMRegister dst, Operand src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
void punpckldq(XMMRegister dst, XMMRegister src);
- void punpckldq(XMMRegister dst, const Operand& src);
+ void punpckldq(XMMRegister dst, Operand src);
void punpckhdq(XMMRegister dst, XMMRegister src);
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
void pextrb(Register dst, XMMRegister src, int8_t imm8);
- void pextrb(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrb(Operand dst, XMMRegister src, int8_t imm8);
void pextrw(Register dst, XMMRegister src, int8_t imm8);
- void pextrw(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrw(Operand dst, XMMRegister src, int8_t imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
- void pextrd(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrd(Operand dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void pinsrb(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void pinsrw(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
- void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmpps(XMMRegister dst, Operand src, int8_t cmp);
void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
- void cmppd(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmppd(XMMRegister dst, Operand src, int8_t cmp);
#define SSE_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
- void instr##ps(XMMRegister dst, const Operand& src) { \
- cmpps(dst, src, imm8); \
- } \
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
- void instr##pd(XMMRegister dst, const Operand& src) { cmppd(dst, src, imm8); }
+ void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
SSE_CMP_P(cmpeq, 0x0);
SSE_CMP_P(cmplt, 0x1);
@@ -1264,25 +1245,25 @@ class Assembler : public AssemblerBase {
#undef SSE_CMP_P
void minps(XMMRegister dst, XMMRegister src);
- void minps(XMMRegister dst, const Operand& src);
+ void minps(XMMRegister dst, Operand src);
void maxps(XMMRegister dst, XMMRegister src);
- void maxps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src);
- void rcpps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src);
- void rsqrtps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, Operand src);
void sqrtps(XMMRegister dst, XMMRegister src);
- void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, Operand src);
void movups(XMMRegister dst, XMMRegister src);
- void movups(XMMRegister dst, const Operand& src);
- void movups(const Operand& dst, XMMRegister src);
+ void movups(XMMRegister dst, Operand src);
+ void movups(Operand dst, XMMRegister src);
void psrldq(XMMRegister dst, uint8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
- void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, Operand src);
// AVX instruction
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1294,13 +1275,13 @@ class Assembler : public AssemblerBase {
void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xb9, dst, src1, src2);
}
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x99, dst, src1, src2);
}
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xa9, dst, src1, src2);
}
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xb9, dst, src1, src2);
}
void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1312,13 +1293,13 @@ class Assembler : public AssemblerBase {
void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbb, dst, src1, src2);
}
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9b, dst, src1, src2);
}
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xab, dst, src1, src2);
}
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbb, dst, src1, src2);
}
void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1330,13 +1311,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbd, dst, src1, src2);
}
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9d, dst, src1, src2);
}
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xad, dst, src1, src2);
}
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbd, dst, src1, src2);
}
void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1348,17 +1329,17 @@ class Assembler : public AssemblerBase {
void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbf, dst, src1, src2);
}
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9f, dst, src1, src2);
}
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xaf, dst, src1, src2);
}
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbf, dst, src1, src2);
}
void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0x99, dst, src1, src2);
@@ -1369,13 +1350,13 @@ class Assembler : public AssemblerBase {
void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xb9, dst, src1, src2);
}
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x99, dst, src1, src2);
}
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xa9, dst, src1, src2);
}
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xb9, dst, src1, src2);
}
void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1387,13 +1368,13 @@ class Assembler : public AssemblerBase {
void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbb, dst, src1, src2);
}
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9b, dst, src1, src2);
}
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xab, dst, src1, src2);
}
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbb, dst, src1, src2);
}
void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1405,13 +1386,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbd, dst, src1, src2);
}
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9d, dst, src1, src2);
}
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xad, dst, src1, src2);
}
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbd, dst, src1, src2);
}
void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1423,34 +1404,30 @@ class Assembler : public AssemblerBase {
void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbf, dst, src1, src2);
}
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9f, dst, src1, src2);
}
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xaf, dst, src1, src2);
}
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbf, dst, src1, src2);
}
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vmovd(XMMRegister dst, Register src);
- void vmovd(XMMRegister dst, const Operand& src);
+ void vmovd(XMMRegister dst, Operand src);
void vmovd(Register dst, XMMRegister src);
void vmovq(XMMRegister dst, Register src);
- void vmovq(XMMRegister dst, const Operand& src);
+ void vmovq(XMMRegister dst, Operand src);
void vmovq(Register dst, XMMRegister src);
void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x10, dst, src1, src2);
}
- void vmovsd(XMMRegister dst, const Operand& src) {
- vsd(0x10, dst, xmm0, src);
- }
- void vmovsd(const Operand& dst, XMMRegister src) {
- vsd(0x11, src, xmm0, dst);
- }
+ void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
+ void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
#define AVX_SP_3(instr, opcode) \
AVX_S_3(instr, opcode) \
@@ -1464,12 +1441,12 @@ class Assembler : public AssemblerBase {
AVX_3(instr##ps, opcode, vps) \
AVX_3(instr##pd, opcode, vpd)
-#define AVX_3(instr, opcode, impl) \
- void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- impl(opcode, dst, src1, src2); \
- } \
- void instr(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- impl(opcode, dst, src1, src2); \
+#define AVX_3(instr, opcode, impl) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ impl(opcode, dst, src1, src2); \
}
AVX_SP_3(vsqrt, 0x51);
@@ -1500,42 +1477,42 @@ class Assembler : public AssemblerBase {
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
- void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
}
- void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
- void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
}
- void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
void vcvttss2si(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
- void vcvttss2si(Register dst, const Operand& src) {
+ void vcvttss2si(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
@@ -1543,7 +1520,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
- void vcvttsd2si(Register dst, const Operand& src) {
+ void vcvttsd2si(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
@@ -1551,7 +1528,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
- void vcvttss2siq(Register dst, const Operand& src) {
+ void vcvttss2siq(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
@@ -1559,7 +1536,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vcvttsd2siq(Register dst, const Operand& src) {
+ void vcvttsd2siq(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
@@ -1570,7 +1547,7 @@ class Assembler : public AssemblerBase {
void vucomisd(XMMRegister dst, XMMRegister src) {
vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
- void vucomisd(XMMRegister dst, const Operand& src) {
+ void vucomisd(XMMRegister dst, Operand src) {
vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -1587,39 +1564,27 @@ class Assembler : public AssemblerBase {
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vss(0x10, dst, src1, src2);
}
- void vmovss(XMMRegister dst, const Operand& src) {
- vss(0x10, dst, xmm0, src);
- }
- void vmovss(const Operand& dst, XMMRegister src) {
- vss(0x11, src, xmm0, dst);
- }
+ void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
+ void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
void vucomiss(XMMRegister dst, XMMRegister src);
- void vucomiss(XMMRegister dst, const Operand& src);
+ void vucomiss(XMMRegister dst, Operand src);
void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
- void vmovups(XMMRegister dst, const Operand& src) {
- vps(0x10, dst, xmm0, src);
- }
- void vmovups(const Operand& dst, XMMRegister src) {
- vps(0x11, src, xmm0, dst);
- }
+ void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
- void vmovupd(XMMRegister dst, const Operand& src) {
- vpd(0x10, dst, xmm0, src);
- }
- void vmovupd(const Operand& dst, XMMRegister src) {
- vpd(0x11, src, xmm0, dst);
- }
+ void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
+ void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
void vmovmskps(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vps(0x50, idst, xmm0, src);
@@ -1632,8 +1597,7 @@ class Assembler : public AssemblerBase {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
- void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp) {
+ void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
@@ -1641,24 +1605,23 @@ class Assembler : public AssemblerBase {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
- void vcmppd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp) {
+ void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmppd(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmppd(dst, src1, src2, imm8); \
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
}
AVX_CMP_P(vcmpeq, 0x0);
@@ -1670,7 +1633,7 @@ class Assembler : public AssemblerBase {
#undef AVX_CMP_P
- void vlddqu(XMMRegister dst, const Operand& src) {
+ void vlddqu(XMMRegister dst, Operand src) {
vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
}
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
@@ -1702,7 +1665,7 @@ class Assembler : public AssemblerBase {
vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrb(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1711,7 +1674,7 @@ class Assembler : public AssemblerBase {
vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
emit(imm8);
}
- void vpextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrw(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1720,7 +1683,7 @@ class Assembler : public AssemblerBase {
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrd(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1729,8 +1692,7 @@ class Assembler : public AssemblerBase {
vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1739,8 +1701,7 @@ class Assembler : public AssemblerBase {
vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
emit(imm8);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
emit(imm8);
}
@@ -1749,8 +1710,7 @@ class Assembler : public AssemblerBase {
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1760,150 +1720,150 @@ class Assembler : public AssemblerBase {
}
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
// BMI instruction
void andnq(Register dst, Register src1, Register src2) {
bmi1q(0xf2, dst, src1, src2);
}
- void andnq(Register dst, Register src1, const Operand& src2) {
+ void andnq(Register dst, Register src1, Operand src2) {
bmi1q(0xf2, dst, src1, src2);
}
void andnl(Register dst, Register src1, Register src2) {
bmi1l(0xf2, dst, src1, src2);
}
- void andnl(Register dst, Register src1, const Operand& src2) {
+ void andnl(Register dst, Register src1, Operand src2) {
bmi1l(0xf2, dst, src1, src2);
}
void bextrq(Register dst, Register src1, Register src2) {
bmi1q(0xf7, dst, src2, src1);
}
- void bextrq(Register dst, const Operand& src1, Register src2) {
+ void bextrq(Register dst, Operand src1, Register src2) {
bmi1q(0xf7, dst, src2, src1);
}
void bextrl(Register dst, Register src1, Register src2) {
bmi1l(0xf7, dst, src2, src1);
}
- void bextrl(Register dst, const Operand& src1, Register src2) {
+ void bextrl(Register dst, Operand src1, Register src2) {
bmi1l(0xf7, dst, src2, src1);
}
void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
- void blsiq(Register dst, const Operand& src) { bmi1q(0xf3, rbx, dst, src); }
+ void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
- void blsil(Register dst, const Operand& src) { bmi1l(0xf3, rbx, dst, src); }
+ void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
- void blsmskq(Register dst, const Operand& src) { bmi1q(0xf3, rdx, dst, src); }
+ void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
- void blsmskl(Register dst, const Operand& src) { bmi1l(0xf3, rdx, dst, src); }
+ void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
- void blsrq(Register dst, const Operand& src) { bmi1q(0xf3, rcx, dst, src); }
+ void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
- void blsrl(Register dst, const Operand& src) { bmi1l(0xf3, rcx, dst, src); }
+ void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
void tzcntq(Register dst, Register src);
- void tzcntq(Register dst, const Operand& src);
+ void tzcntq(Register dst, Operand src);
void tzcntl(Register dst, Register src);
- void tzcntl(Register dst, const Operand& src);
+ void tzcntl(Register dst, Operand src);
void lzcntq(Register dst, Register src);
- void lzcntq(Register dst, const Operand& src);
+ void lzcntq(Register dst, Operand src);
void lzcntl(Register dst, Register src);
- void lzcntl(Register dst, const Operand& src);
+ void lzcntl(Register dst, Operand src);
void popcntq(Register dst, Register src);
- void popcntq(Register dst, const Operand& src);
+ void popcntq(Register dst, Operand src);
void popcntl(Register dst, Register src);
- void popcntl(Register dst, const Operand& src);
+ void popcntl(Register dst, Operand src);
void bzhiq(Register dst, Register src1, Register src2) {
bmi2q(kNone, 0xf5, dst, src2, src1);
}
- void bzhiq(Register dst, const Operand& src1, Register src2) {
+ void bzhiq(Register dst, Operand src1, Register src2) {
bmi2q(kNone, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Register src1, Register src2) {
bmi2l(kNone, 0xf5, dst, src2, src1);
}
- void bzhil(Register dst, const Operand& src1, Register src2) {
+ void bzhil(Register dst, Operand src1, Register src2) {
bmi2l(kNone, 0xf5, dst, src2, src1);
}
void mulxq(Register dst1, Register dst2, Register src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
}
- void mulxq(Register dst1, Register dst2, const Operand& src) {
+ void mulxq(Register dst1, Register dst2, Operand src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
}
void mulxl(Register dst1, Register dst2, Register src) {
bmi2l(kF2, 0xf6, dst1, dst2, src);
}
- void mulxl(Register dst1, Register dst2, const Operand& src) {
+ void mulxl(Register dst1, Register dst2, Operand src) {
bmi2l(kF2, 0xf6, dst1, dst2, src);
}
void pdepq(Register dst, Register src1, Register src2) {
bmi2q(kF2, 0xf5, dst, src1, src2);
}
- void pdepq(Register dst, Register src1, const Operand& src2) {
+ void pdepq(Register dst, Register src1, Operand src2) {
bmi2q(kF2, 0xf5, dst, src1, src2);
}
void pdepl(Register dst, Register src1, Register src2) {
bmi2l(kF2, 0xf5, dst, src1, src2);
}
- void pdepl(Register dst, Register src1, const Operand& src2) {
+ void pdepl(Register dst, Register src1, Operand src2) {
bmi2l(kF2, 0xf5, dst, src1, src2);
}
void pextq(Register dst, Register src1, Register src2) {
bmi2q(kF3, 0xf5, dst, src1, src2);
}
- void pextq(Register dst, Register src1, const Operand& src2) {
+ void pextq(Register dst, Register src1, Operand src2) {
bmi2q(kF3, 0xf5, dst, src1, src2);
}
void pextl(Register dst, Register src1, Register src2) {
bmi2l(kF3, 0xf5, dst, src1, src2);
}
- void pextl(Register dst, Register src1, const Operand& src2) {
+ void pextl(Register dst, Register src1, Operand src2) {
bmi2l(kF3, 0xf5, dst, src1, src2);
}
void sarxq(Register dst, Register src1, Register src2) {
bmi2q(kF3, 0xf7, dst, src2, src1);
}
- void sarxq(Register dst, const Operand& src1, Register src2) {
+ void sarxq(Register dst, Operand src1, Register src2) {
bmi2q(kF3, 0xf7, dst, src2, src1);
}
void sarxl(Register dst, Register src1, Register src2) {
bmi2l(kF3, 0xf7, dst, src2, src1);
}
- void sarxl(Register dst, const Operand& src1, Register src2) {
+ void sarxl(Register dst, Operand src1, Register src2) {
bmi2l(kF3, 0xf7, dst, src2, src1);
}
void shlxq(Register dst, Register src1, Register src2) {
bmi2q(k66, 0xf7, dst, src2, src1);
}
- void shlxq(Register dst, const Operand& src1, Register src2) {
+ void shlxq(Register dst, Operand src1, Register src2) {
bmi2q(k66, 0xf7, dst, src2, src1);
}
void shlxl(Register dst, Register src1, Register src2) {
bmi2l(k66, 0xf7, dst, src2, src1);
}
- void shlxl(Register dst, const Operand& src1, Register src2) {
+ void shlxl(Register dst, Operand src1, Register src2) {
bmi2l(k66, 0xf7, dst, src2, src1);
}
void shrxq(Register dst, Register src1, Register src2) {
bmi2q(kF2, 0xf7, dst, src2, src1);
}
- void shrxq(Register dst, const Operand& src1, Register src2) {
+ void shrxq(Register dst, Operand src1, Register src2) {
bmi2q(kF2, 0xf7, dst, src2, src1);
}
void shrxl(Register dst, Register src1, Register src2) {
bmi2l(kF2, 0xf7, dst, src2, src1);
}
- void shrxl(Register dst, const Operand& src1, Register src2) {
+ void shrxl(Register dst, Operand src1, Register src2) {
bmi2l(kF2, 0xf7, dst, src2, src1);
}
void rorxq(Register dst, Register src, byte imm8);
- void rorxq(Register dst, const Operand& src, byte imm8);
+ void rorxq(Register dst, Operand src, byte imm8);
void rorxl(Register dst, Register src, byte imm8);
- void rorxl(Register dst, const Operand& src, byte imm8);
+ void rorxl(Register dst, Operand src, byte imm8);
void lfence();
void pause();
@@ -1961,7 +1921,7 @@ class Assembler : public AssemblerBase {
protected:
// Call near indirect
- void call(const Operand& operand);
+ void call(Operand operand);
private:
byte* addr_at(int pos) { return buffer_ + pos; }
@@ -1997,8 +1957,8 @@ class Assembler : public AssemblerBase {
// The high bit of reg is used for REX.R, the high bit of op's base
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is set.
- inline void emit_rex_64(Register reg, const Operand& op);
- inline void emit_rex_64(XMMRegister reg, const Operand& op);
+ inline void emit_rex_64(Register reg, Operand op);
+ inline void emit_rex_64(XMMRegister reg, Operand op);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the register code.
@@ -2011,7 +1971,7 @@ class Assembler : public AssemblerBase {
// The high bit of op's base register is used for REX.B, and the high
// bit of op's index register is used for REX.X.
// REX.W is set and REX.R clear.
- inline void emit_rex_64(const Operand& op);
+ inline void emit_rex_64(Operand op);
// Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
void emit_rex_64() { emit(0x48); }
@@ -2023,7 +1983,7 @@ class Assembler : public AssemblerBase {
// The high bit of reg is used for REX.R, the high bit of op's base
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is cleared.
- inline void emit_rex_32(Register reg, const Operand& op);
+ inline void emit_rex_32(Register reg, Operand op);
// High bit of rm_reg goes to REX.B.
// REX.W, REX.R and REX.X are clear.
@@ -2031,7 +1991,7 @@ class Assembler : public AssemblerBase {
// High bit of base goes to REX.B and high bit of index to REX.X.
// REX.W and REX.R are clear.
- inline void emit_rex_32(const Operand& op);
+ inline void emit_rex_32(Operand op);
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is cleared. If no REX bits are set, no byte is emitted.
@@ -2041,7 +2001,7 @@ class Assembler : public AssemblerBase {
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
// is emitted.
- inline void emit_optional_rex_32(Register reg, const Operand& op);
+ inline void emit_optional_rex_32(Register reg, Operand op);
// As for emit_optional_rex_32(Register, Register), except that
// the registers are XMM registers.
@@ -2055,18 +2015,18 @@ class Assembler : public AssemblerBase {
// one of the registers is an XMM registers.
inline void emit_optional_rex_32(Register reg, XMMRegister base);
- // As for emit_optional_rex_32(Register, const Operand&), except that
+ // As for emit_optional_rex_32(Register, Operand), except that
// the register is an XMM register.
- inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
+ inline void emit_optional_rex_32(XMMRegister reg, Operand op);
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
inline void emit_optional_rex_32(XMMRegister rm_reg);
- // Optionally do as emit_rex_32(const Operand&) if the operand register
+ // Optionally do as emit_rex_32(Operand) if the operand register
// numbers have a high bit set.
- inline void emit_optional_rex_32(const Operand& op);
+ inline void emit_optional_rex_32(Operand op);
void emit_rex(int size) {
if (size == kInt64Size) {
@@ -2102,8 +2062,7 @@ class Assembler : public AssemblerBase {
SIMDPrefix pp);
void emit_vex3_byte0() { emit(0xc4); }
inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
- inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm,
- LeadingOpcode m);
+ inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
@@ -2112,10 +2071,10 @@ class Assembler : public AssemblerBase {
inline void emit_vex_prefix(Register reg, Register v, Register rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
- inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, const Operand& rm,
+ inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
- inline void emit_vex_prefix(Register reg, Register v, const Operand& rm,
+ inline void emit_vex_prefix(Register reg, Register v, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
@@ -2123,14 +2082,14 @@ class Assembler : public AssemblerBase {
// 1- or 4-byte offset for a memory operand. Also encodes
// the second operand of the operation, a register or operation
// subcode, into the reg field of the ModR/M byte.
- void emit_operand(Register reg, const Operand& adr) {
+ void emit_operand(Register reg, Operand adr) {
emit_operand(reg.low_bits(), adr);
}
// Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also used to encode
// a three-bit opcode extension into the ModR/M byte.
- void emit_operand(int rm, const Operand& adr);
+ void emit_operand(int rm, Operand adr);
// Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
void emit_modrm(Register reg, Register rm_reg) {
@@ -2149,8 +2108,8 @@ class Assembler : public AssemblerBase {
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(Register reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister reg, Operand adr);
+ void emit_sse_operand(Register reg, Operand adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst);
@@ -2160,37 +2119,28 @@ class Assembler : public AssemblerBase {
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
// Operate on operands/registers with pointer size, 32-bit or 64-bit size.
void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
- void arithmetic_op(byte opcode,
- Register reg,
- const Operand& rm_reg,
- int size);
+ void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
Register dst,
Immediate src);
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src);
+ void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
// Operate on a word in memory or register.
void immediate_arithmetic_op_16(byte subcode,
Register dst,
Immediate src);
- void immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src);
+ void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
// Operate on operands/registers with pointer size, 32-bit or 64-bit size.
void immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src,
int size);
- void immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src,
+ void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
int size);
// Emit machine code for a shift operation.
@@ -2218,15 +2168,15 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x0, dst, src, size);
}
- void emit_add(Register dst, const Operand& src, int size) {
+ void emit_add(Register dst, Operand src, int size) {
arithmetic_op(0x03, dst, src, size);
}
- void emit_add(const Operand& dst, Register src, int size) {
+ void emit_add(Operand dst, Register src, int size) {
arithmetic_op(0x1, src, dst, size);
}
- void emit_add(const Operand& dst, Immediate src, int size) {
+ void emit_add(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x0, dst, src, size);
}
@@ -2234,11 +2184,11 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x23, dst, src, size);
}
- void emit_and(Register dst, const Operand& src, int size) {
+ void emit_and(Register dst, Operand src, int size) {
arithmetic_op(0x23, dst, src, size);
}
- void emit_and(const Operand& dst, Register src, int size) {
+ void emit_and(Operand dst, Register src, int size) {
arithmetic_op(0x21, src, dst, size);
}
@@ -2246,7 +2196,7 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x4, dst, src, size);
}
- void emit_and(const Operand& dst, Immediate src, int size) {
+ void emit_and(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x4, dst, src, size);
}
@@ -2254,11 +2204,11 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x3B, dst, src, size);
}
- void emit_cmp(Register dst, const Operand& src, int size) {
+ void emit_cmp(Register dst, Operand src, int size) {
arithmetic_op(0x3B, dst, src, size);
}
- void emit_cmp(const Operand& dst, Register src, int size) {
+ void emit_cmp(Operand dst, Register src, int size) {
arithmetic_op(0x39, src, dst, size);
}
@@ -2266,17 +2216,17 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x7, dst, src, size);
}
- void emit_cmp(const Operand& dst, Immediate src, int size) {
+ void emit_cmp(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x7, dst, src, size);
}
// Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
// src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
// operation is only atomic if prefixed by the lock instruction.
- void emit_cmpxchg(const Operand& dst, Register src, int size);
+ void emit_cmpxchg(Operand dst, Register src, int size);
void emit_dec(Register dst, int size);
- void emit_dec(const Operand& dst, int size);
+ void emit_dec(Operand dst, int size);
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
@@ -2287,43 +2237,43 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
// rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
void emit_imul(Register src, int size);
- void emit_imul(const Operand& src, int size);
+ void emit_imul(Operand src, int size);
void emit_imul(Register dst, Register src, int size);
- void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Operand src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
- void emit_imul(Register dst, const Operand& src, Immediate imm, int size);
+ void emit_imul(Register dst, Operand src, Immediate imm, int size);
void emit_inc(Register dst, int size);
- void emit_inc(const Operand& dst, int size);
+ void emit_inc(Operand dst, int size);
- void emit_lea(Register dst, const Operand& src, int size);
+ void emit_lea(Register dst, Operand src, int size);
- void emit_mov(Register dst, const Operand& src, int size);
+ void emit_mov(Register dst, Operand src, int size);
void emit_mov(Register dst, Register src, int size);
- void emit_mov(const Operand& dst, Register src, int size);
+ void emit_mov(Operand dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
- void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_mov(Operand dst, Immediate value, int size);
- void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxb(Register dst, Operand src, int size);
void emit_movzxb(Register dst, Register src, int size);
- void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Operand src, int size);
void emit_movzxw(Register dst, Register src, int size);
void emit_neg(Register dst, int size);
- void emit_neg(const Operand& dst, int size);
+ void emit_neg(Operand dst, int size);
void emit_not(Register dst, int size);
- void emit_not(const Operand& dst, int size);
+ void emit_not(Operand dst, int size);
void emit_or(Register dst, Register src, int size) {
arithmetic_op(0x0B, dst, src, size);
}
- void emit_or(Register dst, const Operand& src, int size) {
+ void emit_or(Register dst, Operand src, int size) {
arithmetic_op(0x0B, dst, src, size);
}
- void emit_or(const Operand& dst, Register src, int size) {
+ void emit_or(Operand dst, Register src, int size) {
arithmetic_op(0x9, src, dst, size);
}
@@ -2331,7 +2281,7 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x1, dst, src, size);
}
- void emit_or(const Operand& dst, Immediate src, int size) {
+ void emit_or(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x1, dst, src, size);
}
@@ -2349,28 +2299,28 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x5, dst, src, size);
}
- void emit_sub(Register dst, const Operand& src, int size) {
+ void emit_sub(Register dst, Operand src, int size) {
arithmetic_op(0x2B, dst, src, size);
}
- void emit_sub(const Operand& dst, Register src, int size) {
+ void emit_sub(Operand dst, Register src, int size) {
arithmetic_op(0x29, src, dst, size);
}
- void emit_sub(const Operand& dst, Immediate src, int size) {
+ void emit_sub(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x5, dst, src, size);
}
void emit_test(Register dst, Register src, int size);
void emit_test(Register reg, Immediate mask, int size);
- void emit_test(const Operand& op, Register reg, int size);
- void emit_test(const Operand& op, Immediate mask, int size);
- void emit_test(Register reg, const Operand& op, int size) {
+ void emit_test(Operand op, Register reg, int size);
+ void emit_test(Operand op, Immediate mask, int size);
+ void emit_test(Register reg, Operand op, int size) {
return emit_test(op, reg, size);
}
void emit_xchg(Register dst, Register src, int size);
- void emit_xchg(Register dst, const Operand& src, int size);
+ void emit_xchg(Register dst, Operand src, int size);
void emit_xor(Register dst, Register src, int size) {
if (size == kInt64Size && dst.code() == src.code()) {
@@ -2382,7 +2332,7 @@ class Assembler : public AssemblerBase {
}
}
- void emit_xor(Register dst, const Operand& src, int size) {
+ void emit_xor(Register dst, Operand src, int size) {
arithmetic_op(0x33, dst, src, size);
}
@@ -2390,25 +2340,23 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x6, dst, src, size);
}
- void emit_xor(const Operand& dst, Immediate src, int size) {
+ void emit_xor(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x6, dst, src, size);
}
- void emit_xor(const Operand& dst, Register src, int size) {
+ void emit_xor(Operand dst, Register src, int size) {
arithmetic_op(0x31, src, dst, size);
}
// Most BMI instructions are similar.
void bmi1q(byte op, Register reg, Register vreg, Register rm);
- void bmi1q(byte op, Register reg, Register vreg, const Operand& rm);
+ void bmi1q(byte op, Register reg, Register vreg, Operand rm);
void bmi1l(byte op, Register reg, Register vreg, Register rm);
- void bmi1l(byte op, Register reg, Register vreg, const Operand& rm);
+ void bmi1l(byte op, Register reg, Register vreg, Operand rm);
void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
- void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
- void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
// record the position of jmp/jcc instruction
void record_farjmp_position(Label* L, int pos);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 27061c1e2b..2ff00f0402 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -135,7 +135,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
&try_arithmetic_simplification,
&try_arithmetic_simplification);
__ jmp(&int_exponent);
@@ -425,6 +424,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ movp(rdi, masm->ExternalOperand(pending_handler_entrypoint_address));
__ jmp(rdi);
@@ -609,7 +614,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Call the entry hook function.
__ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
- Assembler::RelocInfoNone());
+ RelocInfo::NONE);
AllowExternalCallThatCantCauseGC scope(masm);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 25a74b98fc..ee2cfd5e8b 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -28,9 +28,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 247f5e889e..91cee67bdd 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -10,6 +10,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
+#include "src/base/v8-fallthrough.h"
#include "src/disasm.h"
#include "src/x64/sse-instr.h"
@@ -1840,6 +1841,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD9) {
mnemonic = "psubusw";
} else if (opcode == 0xDA) {
+ mnemonic = "pand";
+ } else if (opcode == 0xDB) {
mnemonic = "pminub";
} else if (opcode == 0xDC) {
mnemonic = "paddusb";
@@ -1857,6 +1860,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psubsw";
} else if (opcode == 0xEA) {
mnemonic = "pminsw";
+ } else if (opcode == 0xEB) {
+ mnemonic = "por";
} else if (opcode == 0xEC) {
mnemonic = "paddsb";
} else if (opcode == 0xED) {
@@ -2703,7 +2708,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF6:
- byte_size_operand_ = true; // fall through
+ byte_size_operand_ = true;
+ V8_FALLTHROUGH;
case 0xF7:
data += F6F7Instruction(data);
break;
@@ -2814,6 +2820,11 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(buffer, instruction);
}
+int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerX64 d(converter_, ABORT_ON_UNIMPLEMENTED_OPCODE);
+ return d.InstructionDecode(buffer, instruction);
+}
// The X64 assembler does not use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index 95bad4c01a..07d2d1a8b1 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_FRAMES_X64_H_
-#define V8_X64_FRAMES_X64_H_
+#ifndef V8_X64_FRAME_CONSTANTS_X64_H_
+#define V8_X64_FRAME_CONSTANTS_X64_H_
namespace v8 {
namespace internal {
@@ -55,4 +55,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_X64_FRAMES_X64_H_
+#endif // V8_X64_FRAME_CONSTANTS_X64_H_
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 22bad696d2..be32df3164 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -69,13 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // SharedFunctionInfo, vector, slot index.
- Register registers[] = {rbx, rcx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5019be3727..e09321e183 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -15,6 +15,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/x64/assembler-x64.h"
@@ -187,7 +188,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
Move(kScratchRegister, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
+ RelocInfo::NONE);
}
Push(Immediate(static_cast<int32_t>(address)));
return;
@@ -213,8 +214,7 @@ void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
(index << kPointerSizeLog2) - kRootRegisterBias));
}
-void TurboAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
+void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
@@ -256,9 +256,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(value, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
- Move(dst, reinterpret_cast<Address>(kZapValue), Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
+ Move(dst, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
}
}
@@ -388,10 +387,8 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(address, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
- Move(value, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
+ Move(address, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
+ Move(value, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
}
}
@@ -616,7 +613,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, dst, src);
@@ -634,7 +631,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, dst, src);
@@ -654,7 +651,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -676,7 +673,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -698,7 +695,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -720,7 +717,7 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -775,7 +772,7 @@ void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -793,7 +790,7 @@ void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -811,7 +808,7 @@ void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -829,7 +826,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -838,8 +835,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+void MacroAssembler::Load(Register dst, Operand src, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsxbq(dst, src);
@@ -856,8 +852,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
}
}
-
-void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+void MacroAssembler::Store(Operand dst, Register src, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
movb(dst, src);
@@ -887,7 +882,7 @@ void TurboAssembler::Set(Register dst, int64_t x) {
}
}
-void TurboAssembler::Set(const Operand& dst, intptr_t x) {
+void TurboAssembler::Set(Operand dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
if (is_int32(x)) {
movp(dst, Immediate(static_cast<int32_t>(x)));
@@ -921,7 +916,7 @@ void TurboAssembler::Move(Register dst, Smi* source) {
if (value == 0) {
xorl(dst, dst);
} else {
- Move(dst, source, Assembler::RelocInfoNone());
+ Move(dst, source, RelocInfo::NONE);
}
}
@@ -947,7 +942,7 @@ void TurboAssembler::SmiToInteger32(Register dst, Register src) {
}
}
-void TurboAssembler::SmiToInteger32(Register dst, const Operand& src) {
+void TurboAssembler::SmiToInteger32(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
@@ -993,22 +988,19 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
}
}
-
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+void MacroAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
}
-
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+void MacroAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
}
-
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+void MacroAssembler::SmiCompare(Operand dst, Smi* src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
@@ -1018,8 +1010,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
}
}
-
-void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
+void MacroAssembler::Cmp(Operand dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
@@ -1033,7 +1024,7 @@ Condition TurboAssembler::CheckSmi(Register src) {
return zero;
}
-Condition TurboAssembler::CheckSmi(const Operand& src) {
+Condition TurboAssembler::CheckSmi(Operand src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
@@ -1059,7 +1050,7 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
+void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
if (constant->value() != 0) {
if (SmiValuesAre32Bits()) {
addl(Operand(dst, kSmiShift / kBitsPerByte),
@@ -1167,10 +1158,10 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
Pcmpeqd(dst, dst);
} else if (pop + ntz == 64) {
Pcmpeqd(dst, dst);
- Psllq(dst, ntz);
+ Psllq(dst, static_cast<byte>(ntz));
} else if (pop + nlz == 64) {
Pcmpeqd(dst, dst);
- Psrlq(dst, nlz);
+ Psrlq(dst, static_cast<byte>(nlz));
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
@@ -1184,260 +1175,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Movaps(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovaps(dst, src);
- } else {
- movaps(dst, src);
- }
-}
-
-void TurboAssembler::Movups(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movups(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movups(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovapd(dst, src);
- } else {
- movapd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movss(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movss(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movss(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movd(XMMRegister dst, Register src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movd(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movq(XMMRegister dst, Register src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovq(dst, src);
- } else {
- movq(dst, src);
- }
-}
-
-void TurboAssembler::Movq(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovq(dst, src);
- } else {
- movq(dst, src);
- }
-}
-
-void TurboAssembler::Movmskps(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovmskps(dst, src);
- } else {
- movmskps(dst, src);
- }
-}
-
-void TurboAssembler::Movmskpd(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovmskpd(dst, src);
- } else {
- movmskpd(dst, src);
- }
-}
-
-void TurboAssembler::Xorps(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(dst, dst, src);
- } else {
- xorps(dst, src);
- }
-}
-
-void TurboAssembler::Xorps(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(dst, dst, src);
- } else {
- xorps(dst, src);
- }
-}
-
-void TurboAssembler::Roundss(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundss(dst, dst, src, mode);
- } else {
- roundss(dst, src, mode);
- }
-}
-
-void TurboAssembler::Roundsd(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundsd(dst, dst, src, mode);
- } else {
- roundsd(dst, src, mode);
- }
-}
-
-void TurboAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vsqrtsd(dst, dst, src);
- } else {
- sqrtsd(dst, src);
- }
-}
-
-void TurboAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vsqrtsd(dst, dst, src);
- } else {
- sqrtsd(dst, src);
- }
-}
-
-void TurboAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomiss(src1, src2);
- } else {
- ucomiss(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomiss(src1, src2);
- } else {
- ucomiss(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomisd(src1, src2);
- } else {
- ucomisd(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomisd(src1, src2);
- } else {
- ucomisd(src1, src2);
- }
-}
-
// ----------------------------------------------------------------------------
void MacroAssembler::Absps(XMMRegister dst) {
@@ -1470,8 +1207,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
}
}
-
-void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
@@ -1491,7 +1227,7 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
movp(result, reinterpret_cast<void*>(object.address()), rmode);
}
-void TurboAssembler::Move(const Operand& dst, Handle<HeapObject> object,
+void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
Move(kScratchRegister, object, rmode);
movp(dst, kScratchRegister);
@@ -1528,7 +1264,7 @@ void TurboAssembler::Push(Register src) {
}
}
-void TurboAssembler::Push(const Operand& src) {
+void TurboAssembler::Push(Operand src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -1538,8 +1274,7 @@ void TurboAssembler::Push(const Operand& src) {
}
}
-
-void MacroAssembler::PushQuad(const Operand& src) {
+void MacroAssembler::PushQuad(Operand src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -1579,8 +1314,7 @@ void MacroAssembler::Pop(Register dst) {
}
}
-
-void MacroAssembler::Pop(const Operand& dst) {
+void MacroAssembler::Pop(Operand dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
@@ -1596,8 +1330,7 @@ void MacroAssembler::Pop(const Operand& dst) {
}
}
-
-void MacroAssembler::PopQuad(const Operand& dst) {
+void MacroAssembler::PopQuad(Operand dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
@@ -1612,8 +1345,7 @@ void MacroAssembler::Jump(ExternalReference ext) {
jmp(kScratchRegister);
}
-
-void MacroAssembler::Jump(const Operand& op) {
+void MacroAssembler::Jump(Operand op) {
if (kPointerSize == kInt64Size) {
jmp(op);
} else {
@@ -1634,6 +1366,12 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ Address bytes_address = reinterpret_cast<Address>(stream->bytes());
+ Move(kOffHeapTrampolineRegister, bytes_address, RelocInfo::NONE);
+ jmp(kOffHeapTrampolineRegister);
+}
+
int TurboAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
return LoadAddressSize(ext) +
@@ -1649,7 +1387,7 @@ void TurboAssembler::Call(ExternalReference ext) {
DCHECK_EQ(end_position, pc_offset());
}
-void TurboAssembler::Call(const Operand& op) {
+void TurboAssembler::Call(Operand op) {
if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
@@ -1751,7 +1489,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -1781,7 +1519,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-void TurboAssembler::Lzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -1809,7 +1547,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-void TurboAssembler::Lzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -1837,7 +1575,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -1864,7 +1602,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -1886,7 +1624,7 @@ void TurboAssembler::Popcntl(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntl(Register dst, const Operand& src) {
+void TurboAssembler::Popcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -1904,7 +1642,7 @@ void TurboAssembler::Popcntq(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntq(Register dst, const Operand& src) {
+void TurboAssembler::Popcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -2035,29 +1773,13 @@ void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan,
- Label* minus_zero, Label::Distance dst) {
+ XMMRegister scratch, Label* lost_precision,
+ Label* is_nan, Label::Distance dst) {
Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(kScratchDoubleReg, result_reg);
Ucomisd(kScratchDoubleReg, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- testl(result_reg, result_reg);
- j(not_zero, &done, Label::kNear);
- Movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // jump to minus_zero.
- andl(result_reg, Immediate(1));
- j(not_zero, minus_zero, dst);
- bind(&done);
- }
}
@@ -2076,8 +1798,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-
-void MacroAssembler::AssertSmi(const Operand& object) {
+void MacroAssembler::AssertSmi(Operand object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
@@ -2310,6 +2031,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2387,12 +2109,31 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Operand debug_is_active_operand = ExternalOperand(debug_is_active);
+ cmpb(debug_is_active_operand, Immediate(0));
+ j(equal, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
- j(equal, &skip_hook);
+ j(not_equal, &call_hook);
+
+ movp(kScratchRegister,
+ FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ movp(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(kScratchRegister, &skip_hook);
+ movp(kScratchRegister,
+ FieldOperand(kScratchRegister, DebugInfo::kFlagsOffset));
+ SmiToInteger32(kScratchRegister, kScratchRegister);
+ testp(kScratchRegister, Immediate(DebugInfo::kBreakAtEntry));
+ j(zero, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2446,13 +2187,13 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
if (type == StackFrame::INTERNAL) {
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister);
- }
- if (emit_debug_code()) {
- Move(kScratchRegister,
- isolate()->factory()->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpp(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ // Check at runtime that this code object was patched correctly.
+ if (emit_debug_code()) {
+ Move(kScratchRegister, isolate()->factory()->undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ cmpp(Operand(rsp, 0), kScratchRegister);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ }
}
}
@@ -2739,6 +2480,22 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
}
j(cc, condition_met, condition_met_distance);
}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ Label current;
+ bind(&current);
+ int pc = pc_offset();
+ // Load effective address to get the address of the current instruction.
+ leaq(dst, Operand(&current));
+ if (pc != 0) {
+ subq(dst, Immediate(pc));
+ }
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Set(kSpeculationPoisonRegister, -1);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 4ceab2cf9c..faa0462cd1 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -20,11 +20,13 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
+constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r12;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kJavaScriptCallArgCountRegister = rax;
+constexpr Register kJavaScriptCallCodeStartRegister = rcx;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
@@ -38,6 +40,7 @@ constexpr Register kRootRegister = r13; // callee save
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
constexpr int kRootRegisterBias = 128;
+constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -133,51 +136,83 @@ class TurboAssembler : public Assembler {
return code_object_;
}
-#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
- void macro_name(XMMRegister dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
+ template <typename Dst, typename... Args>
+ struct AvxHelper {
+ Assembler* assm;
+ // Call an method where the AVX version expects the dst argument to be
+ // duplicated.
+ template <void (Assembler::*avx)(Dst, Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+
+ // Call an method where the AVX version expects no duplicated dst argument.
+ template <void (Assembler::*avx)(Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+ };
+
+#define AVX_OP(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
}
-#define AVX_OP2_X(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
-#define AVX_OP2_O(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
-#define AVX_OP2_XO(macro_name, name) \
- AVX_OP2_X(macro_name, name) \
- AVX_OP2_O(macro_name, name)
-
- AVX_OP2_XO(Subsd, subsd)
- AVX_OP2_XO(Divss, divss)
- AVX_OP2_XO(Divsd, divsd)
- AVX_OP2_XO(Xorpd, xorpd)
- AVX_OP2_X(Pcmpeqd, pcmpeqd)
- AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
- AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
-
-#undef AVX_OP2_O
-#undef AVX_OP2_X
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE
-
- void Xorps(XMMRegister dst, XMMRegister src);
- void Xorps(XMMRegister dst, const Operand& src);
-
- void Movd(XMMRegister dst, Register src);
- void Movd(XMMRegister dst, const Operand& src);
- void Movd(Register dst, XMMRegister src);
- void Movq(XMMRegister dst, Register src);
- void Movq(Register dst, XMMRegister src);
-
- void Movsd(XMMRegister dst, XMMRegister src);
- void Movsd(XMMRegister dst, const Operand& src);
- void Movsd(const Operand& dst, XMMRegister src);
- void Movss(XMMRegister dst, XMMRegister src);
- void Movss(XMMRegister dst, const Operand& src);
- void Movss(const Operand& dst, XMMRegister src);
+
+ AVX_OP(Subsd, subsd)
+ AVX_OP(Divss, divss)
+ AVX_OP(Divsd, divsd)
+ AVX_OP(Xorps, xorps)
+ AVX_OP(Xorpd, xorpd)
+ AVX_OP(Movd, movd)
+ AVX_OP(Movq, movq)
+ AVX_OP(Movaps, movaps)
+ AVX_OP(Movapd, movapd)
+ AVX_OP(Movups, movups)
+ AVX_OP(Movmskps, movmskps)
+ AVX_OP(Movmskpd, movmskpd)
+ AVX_OP(Movss, movss)
+ AVX_OP(Movsd, movsd)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Psllq, psllq)
+ AVX_OP(Psrlq, psrlq)
+ AVX_OP(Addsd, addsd)
+ AVX_OP(Mulsd, mulsd)
+ AVX_OP(Andps, andps)
+ AVX_OP(Andpd, andpd)
+ AVX_OP(Orpd, orpd)
+ AVX_OP(Cmpeqps, cmpeqps)
+ AVX_OP(Cmpltps, cmpltps)
+ AVX_OP(Cmpleps, cmpleps)
+ AVX_OP(Cmpneqps, cmpneqps)
+ AVX_OP(Cmpnltps, cmpnltps)
+ AVX_OP(Cmpnleps, cmpnleps)
+ AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmplepd, cmplepd)
+ AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpnltpd, cmpnltpd)
+ AVX_OP(Cmpnlepd, cmpnlepd)
+ AVX_OP(Roundss, roundss)
+ AVX_OP(Roundsd, roundsd)
+ AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Ucomiss, ucomiss)
+ AVX_OP(Ucomisd, ucomisd)
+
+#undef AVX_OP
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -190,25 +225,17 @@ class TurboAssembler : public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, intptr_t x);
+ void Set(Operand dst, intptr_t x);
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
- void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
+ void LoadRoot(Operand destination, Heap::RootListIndex index) {
LoadRoot(kScratchRegister, index);
movp(destination, kScratchRegister);
}
- void Movups(XMMRegister dst, XMMRegister src);
- void Movups(XMMRegister dst, const Operand& src);
- void Movups(const Operand& dst, XMMRegister src);
- void Movapd(XMMRegister dst, XMMRegister src);
- void Movaps(XMMRegister dst, XMMRegister src);
- void Movmskpd(Register dst, XMMRegister src);
- void Movmskps(Register dst, XMMRegister src);
-
void Push(Register src);
- void Push(const Operand& src);
+ void Push(Operand src);
void Push(Immediate value);
void Push(Smi* smi);
void Push(Handle<HeapObject> source);
@@ -239,23 +266,23 @@ class TurboAssembler : public Assembler {
Label::Distance condition_met_distance = Label::kFar);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
- void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtss2sd(XMMRegister dst, Operand src);
void Cvtsd2ss(XMMRegister dst, XMMRegister src);
- void Cvtsd2ss(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, Operand src);
void Cvttsd2si(Register dst, XMMRegister src);
- void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttsd2si(Register dst, Operand src);
void Cvttsd2siq(Register dst, XMMRegister src);
- void Cvttsd2siq(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, Operand src);
void Cvttss2si(Register dst, XMMRegister src);
- void Cvttss2si(Register dst, const Operand& src);
+ void Cvttss2si(Register dst, Operand src);
void Cvttss2siq(Register dst, XMMRegister src);
- void Cvttss2siq(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, Operand src);
void Cvtqsi2ss(XMMRegister dst, Register src);
- void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Operand src);
void Cvtqsi2sd(XMMRegister dst, Register src);
- void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqsi2sd(XMMRegister dst, Operand src);
void Cvtlsi2ss(XMMRegister dst, Register src);
- void Cvtlsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtlsi2ss(XMMRegister dst, Operand src);
void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
@@ -263,35 +290,24 @@ class TurboAssembler : public Assembler {
// hinders register renaming and makes dependence chains longer. So we use
// xorpd to clear the dst register before cvtsi2sd to solve this issue.
void Cvtlsi2sd(XMMRegister dst, Register src);
- void Cvtlsi2sd(XMMRegister dst, const Operand& src);
-
- void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void Sqrtsd(XMMRegister dst, XMMRegister src);
- void Sqrtsd(XMMRegister dst, const Operand& src);
-
- void Ucomiss(XMMRegister src1, XMMRegister src2);
- void Ucomiss(XMMRegister src1, const Operand& src2);
- void Ucomisd(XMMRegister src1, XMMRegister src2);
- void Ucomisd(XMMRegister src1, const Operand& src2);
+ void Cvtlsi2sd(XMMRegister dst, Operand src);
void Lzcntq(Register dst, Register src);
- void Lzcntq(Register dst, const Operand& src);
+ void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
- void Lzcntl(Register dst, const Operand& src);
+ void Lzcntl(Register dst, Operand src);
void Tzcntq(Register dst, Register src);
- void Tzcntq(Register dst, const Operand& src);
+ void Tzcntq(Register dst, Operand src);
void Tzcntl(Register dst, Register src);
- void Tzcntl(Register dst, const Operand& src);
+ void Tzcntl(Register dst, Operand src);
void Popcntl(Register dst, Register src);
- void Popcntl(Register dst, const Operand& src);
+ void Popcntl(Register dst, Operand src);
void Popcntq(Register dst, Register src);
- void Popcntq(Register dst, const Operand& src);
+ void Popcntq(Register dst, Operand src);
// Is the value a tagged smi.
Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
+ Condition CheckSmi(Operand src);
// Jump to label if the value is a tagged smi.
void JumpIfSmi(Register src, Label* on_smi,
@@ -299,7 +315,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Smi* source);
- void Move(const Operand& dst, Smi* source) {
+ void Move(Operand dst, Smi* source) {
Register constant = GetSmiConstant(source);
movp(dst, constant);
}
@@ -319,7 +335,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
- void Move(const Operand& dst, Handle<HeapObject> source,
+ void Move(Operand dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
// Loads a pointer into a register with a relocation mode.
@@ -333,13 +349,13 @@ class TurboAssembler : public Assembler {
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
+ void SmiToInteger32(Register dst, Operand src);
// Loads the address of the external reference into the destination
// register.
void LoadAddress(Register destination, ExternalReference source);
- void Call(const Operand& op);
+ void Call(Operand op);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
@@ -365,7 +381,7 @@ class TurboAssembler : public Assembler {
// Opcode: REX_opt FF /2 m64
return (target.high_bit() != 0) ? 3 : 2;
}
- int CallSize(const Operand& target) {
+ int CallSize(Operand target) {
// Opcode: REX_opt FF /2 m64
return (target.requires_rex() ? 2 : 1) + target.operand_size();
}
@@ -377,10 +393,10 @@ class TurboAssembler : public Assembler {
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
@@ -476,6 +492,12 @@ class TurboAssembler : public Assembler {
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int smi_count = 0;
@@ -551,8 +573,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_equal,
+ void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
@@ -565,7 +586,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
@@ -649,10 +670,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
@@ -668,9 +685,9 @@ class MacroAssembler : public TurboAssembler {
// otherwise use Cmp.
void SmiCompare(Register smi1, Register smi2);
void SmiCompare(Register dst, Smi* src);
- void SmiCompare(Register dst, const Operand& src);
- void SmiCompare(const Operand& dst, Register src);
- void SmiCompare(const Operand& dst, Smi* src);
+ void SmiCompare(Register dst, Operand src);
+ void SmiCompare(Operand dst, Register src);
+ void SmiCompare(Operand dst, Smi* src);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
@@ -694,7 +711,7 @@ class MacroAssembler : public TurboAssembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
- void SmiAddConstant(const Operand& dst, Smi* constant);
+ void SmiAddConstant(Operand dst, Smi* constant);
// Specialized operations
@@ -712,13 +729,13 @@ class MacroAssembler : public TurboAssembler {
// Macro instructions.
// Load/store with specific representation.
- void Load(Register dst, const Operand& src, Representation r);
- void Store(const Operand& dst, Register src, Representation r);
+ void Load(Register dst, Operand src, Representation r);
+ void Store(Operand dst, Register src, Representation r);
void Cmp(Register dst, Handle<Object> source);
- void Cmp(const Operand& dst, Handle<Object> source);
+ void Cmp(Operand dst, Handle<Object> source);
void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
+ void Cmp(Operand dst, Smi* src);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
@@ -729,51 +746,11 @@ class MacroAssembler : public TurboAssembler {
void DropUnderReturnAddress(int stack_elements,
Register scratch = kScratchRegister);
- void PushQuad(const Operand& src);
+ void PushQuad(Operand src);
void PushImm32(int32_t imm32);
void Pop(Register dst);
- void Pop(const Operand& dst);
- void PopQuad(const Operand& dst);
-
-#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
- void macro_name(XMMRegister dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
- }
-#define AVX_OP2_X(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
-#define AVX_OP2_O(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
-#define AVX_OP2_XO(macro_name, name) \
- AVX_OP2_X(macro_name, name) \
- AVX_OP2_O(macro_name, name)
-
- AVX_OP2_XO(Addsd, addsd)
- AVX_OP2_XO(Mulsd, mulsd)
- AVX_OP2_XO(Andps, andps)
- AVX_OP2_XO(Andpd, andpd)
- AVX_OP2_XO(Orpd, orpd)
- AVX_OP2_XO(Cmpeqps, cmpeqps)
- AVX_OP2_XO(Cmpltps, cmpltps)
- AVX_OP2_XO(Cmpleps, cmpleps)
- AVX_OP2_XO(Cmpneqps, cmpneqps)
- AVX_OP2_XO(Cmpnltps, cmpnltps)
- AVX_OP2_XO(Cmpnleps, cmpnleps)
- AVX_OP2_XO(Cmpeqpd, cmpeqpd)
- AVX_OP2_XO(Cmpltpd, cmpltpd)
- AVX_OP2_XO(Cmplepd, cmplepd)
- AVX_OP2_XO(Cmpneqpd, cmpneqpd)
- AVX_OP2_XO(Cmpnltpd, cmpnltpd)
- AVX_OP2_XO(Cmpnlepd, cmpnlepd)
-
-#undef AVX_OP2_O
-#undef AVX_OP2_X
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE
+ void Pop(Operand dst);
+ void PopQuad(Operand dst);
// ---------------------------------------------------------------------------
// SIMD macros.
@@ -785,9 +762,12 @@ class MacroAssembler : public TurboAssembler {
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
- void Jump(const Operand& op);
+ void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -806,8 +786,7 @@ class MacroAssembler : public TurboAssembler {
void CmpInstanceType(Register map, InstanceType type);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan, Label* minus_zero,
+ XMMRegister scratch, Label* lost_precision, Label* is_nan,
Label::Distance dst = Label::kFar);
template<typename Field>
@@ -825,7 +804,7 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object);
- void AssertSmi(const Operand& object);
+ void AssertSmi(Operand object);
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/x64/sse-instr.h
index 235aa75fcf..a6614c2346 100644
--- a/deps/v8/src/x64/sse-instr.h
+++ b/deps/v8/src/x64/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SSE_INSTR_H_
-#define V8_SSE_INSTR_H_
+#ifndef V8_X64_SSE_INSTR_H_
+#define V8_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
@@ -70,4 +70,4 @@
V(pmulld, 66, 0F, 38, 40) \
V(ptest, 66, 0F, 38, 17)
-#endif // V8_SSE_INSTR_H_
+#endif // V8_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index 53d30b3826..bf36a7ff95 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -6,6 +6,7 @@
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#include "include/v8-platform.h"
+#include "include/v8.h"
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index a0aaca8b09..229a3f3f40 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -8,18 +8,14 @@
#include "src/utils.h"
#include "src/zone/zone.h"
-#ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
-#define V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#ifndef V8_ZONE_ZONE_CHUNK_LIST_H_
+#define V8_ZONE_ZONE_CHUNK_LIST_H_
namespace v8 {
namespace internal {
-template <typename T>
+template <typename T, bool backwards, bool modifiable>
class ZoneChunkListIterator;
-template <typename T>
-class ForwardZoneChunkListIterator;
-template <typename T>
-class ReverseZoneChunkListIterator;
// A zone-backed hybrid of a vector and a linked list. Use it if you need a
// collection that
@@ -38,6 +34,11 @@ class ReverseZoneChunkListIterator;
template <typename T>
class ZoneChunkList : public ZoneObject {
public:
+ using iterator = ZoneChunkListIterator<T, false, true>;
+ using const_iterator = ZoneChunkListIterator<T, false, false>;
+ using reverse_iterator = ZoneChunkListIterator<T, true, true>;
+ using const_reverse_iterator = ZoneChunkListIterator<T, true, false>;
+
enum class StartMode {
// The list will not allocate a starting chunk. Use if you expect your
// list to remain empty in many cases.
@@ -58,7 +59,7 @@ class ZoneChunkList : public ZoneObject {
}
}
- size_t size() const;
+ size_t size() const { return size_; }
T& front() const;
T& back() const;
@@ -78,27 +79,31 @@ class ZoneChunkList : public ZoneObject {
// Quickly scans the list to retrieve the element at the given index. Will
// *not* check bounds.
- ForwardZoneChunkListIterator<T> Find(const size_t index);
- ForwardZoneChunkListIterator<const T> Find(const size_t index) const;
+ iterator Find(const size_t index);
+ const_iterator Find(const size_t index) const;
// TODO(heimbuef): Add 'rFind', seeking from the end and returning a
// reverse iterator.
void CopyTo(T* ptr);
- ForwardZoneChunkListIterator<T> begin();
- ForwardZoneChunkListIterator<T> end();
- ReverseZoneChunkListIterator<T> rbegin();
- ReverseZoneChunkListIterator<T> rend();
- ForwardZoneChunkListIterator<const T> begin() const;
- ForwardZoneChunkListIterator<const T> end() const;
- ReverseZoneChunkListIterator<const T> rbegin() const;
- ReverseZoneChunkListIterator<const T> rend() const;
+ iterator begin() { return iterator::Begin(this); }
+ iterator end() { return iterator::End(this); }
+ reverse_iterator rbegin() { return reverse_iterator::Begin(this); }
+ reverse_iterator rend() { return reverse_iterator::End(this); }
+ const_iterator begin() const { return const_iterator::Begin(this); }
+ const_iterator end() const { return const_iterator::End(this); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator::Begin(this);
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator::End(this);
+ }
private:
- friend class ZoneChunkListIterator<T>;
- friend class ForwardZoneChunkListIterator<T>;
- friend class ReverseZoneChunkListIterator<T>;
- static const uint32_t kMaxChunkCapacity = 256u;
+ template <typename S, bool backwards, bool modifiable>
+ friend class ZoneChunkListIterator;
+
+ static constexpr uint32_t kMaxChunkCapacity = 256u;
STATIC_ASSERT(kMaxChunkCapacity == static_cast<uint32_t>(StartMode::kBig));
@@ -108,6 +113,7 @@ class ZoneChunkList : public ZoneObject {
Chunk* next_ = nullptr;
Chunk* previous_ = nullptr;
T* items() { return reinterpret_cast<T*>(this + 1); }
+ const T* items() const { return reinterpret_cast<const T*>(this + 1); }
};
Chunk* NewChunk(const uint32_t capacity) {
@@ -135,152 +141,108 @@ class ZoneChunkList : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ZoneChunkList);
};
-template <typename T>
+template <typename T, bool backwards, bool modifiable>
class ZoneChunkListIterator {
+ private:
+ template <typename S>
+ using maybe_const =
+ typename std::conditional<modifiable, S,
+ typename std::add_const<S>::type>::type;
+ using Chunk = maybe_const<typename ZoneChunkList<T>::Chunk>;
+ using ChunkList = maybe_const<ZoneChunkList<T>>;
+
public:
- T& operator*() { return current_->items()[position_]; }
- bool operator==(const ZoneChunkListIterator& other) {
+ maybe_const<T>& operator*() { return current_->items()[position_]; }
+ bool operator==(const ZoneChunkListIterator& other) const {
return other.current_ == current_ && other.position_ == position_;
}
- bool operator!=(const ZoneChunkListIterator& other) {
+ bool operator!=(const ZoneChunkListIterator& other) const {
return !operator==(other);
}
- protected:
- ZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : current_(current), position_(position) {}
-
- void MoveNext() {
- ++position_;
- if (position_ >= current_->capacity_) {
- current_ = current_->next_;
- position_ = 0;
- }
- }
-
- void MoveRNext() {
- if (position_ == 0) {
- current_ = current_->previous_;
- position_ = current_ ? current_->capacity_ - 1 : 0;
- } else {
- --position_;
- }
- }
-
- typename ZoneChunkList<T>::Chunk* current_;
- size_t position_;
-};
-
-template <typename T>
-class ForwardZoneChunkListIterator : public ZoneChunkListIterator<T> {
- using ZoneChunkListIterator<T>::current_;
- using ZoneChunkListIterator<T>::position_;
- using ZoneChunkListIterator<T>::MoveNext;
- using ZoneChunkListIterator<T>::MoveRNext;
-
- public:
- ForwardZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : ZoneChunkListIterator<T>(current, position) {}
-
- ForwardZoneChunkListIterator& operator++() {
- MoveNext();
+ ZoneChunkListIterator& operator++() {
+ Move<backwards>();
return *this;
}
- ForwardZoneChunkListIterator operator++(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveNext();
+ ZoneChunkListIterator operator++(int) {
+ ZoneChunkListIterator clone(*this);
+ Move<backwards>();
return clone;
}
- ForwardZoneChunkListIterator& operator--() {
- MoveRNext();
+ ZoneChunkListIterator& operator--() {
+ Move<!backwards>();
return *this;
}
- ForwardZoneChunkListIterator operator--(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveRNext();
+ ZoneChunkListIterator operator--(int) {
+ ZoneChunkListIterator clone(*this);
+ Move<!backwards>();
return clone;
}
private:
friend class ZoneChunkList<T>;
- static ForwardZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
- return ForwardZoneChunkListIterator<T>(list->front_, 0);
- }
- static ForwardZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
- if (list->back_ == nullptr) return Begin(list);
- DCHECK_LE(list->back_->position_, list->back_->capacity_);
- if (list->back_->position_ == list->back_->capacity_) {
- return ForwardZoneChunkListIterator<T>(nullptr, 0);
- }
+ static ZoneChunkListIterator Begin(ChunkList* list) {
+ // Forward iterator:
+ if (!backwards) return ZoneChunkListIterator(list->front_, 0);
- return ForwardZoneChunkListIterator<T>(list->back_, list->back_->position_);
+ // Backward iterator:
+ if (list->back_ == nullptr) return End(list);
+ if (list->back_->position_ == 0) {
+ if (list->back_->previous_ != nullptr) {
+ return ZoneChunkListIterator(list->back_->previous_,
+ list->back_->previous_->capacity_ - 1);
+ } else {
+ return End(list);
+ }
+ }
+ return ZoneChunkListIterator(list->back_, list->back_->position_ - 1);
}
-};
-template <typename T>
-class ReverseZoneChunkListIterator : public ZoneChunkListIterator<T> {
- using ZoneChunkListIterator<T>::current_;
- using ZoneChunkListIterator<T>::position_;
- using ZoneChunkListIterator<T>::MoveNext;
- using ZoneChunkListIterator<T>::MoveRNext;
+ static ZoneChunkListIterator End(ChunkList* list) {
+ // Backward iterator:
+ if (backwards) return ZoneChunkListIterator(nullptr, 0);
- public:
- ReverseZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : ZoneChunkListIterator<T>(current, position) {}
-
- ReverseZoneChunkListIterator& operator++() {
- MoveRNext();
- return *this;
- }
+ // Forward iterator:
+ if (list->back_ == nullptr) return Begin(list);
- ReverseZoneChunkListIterator operator++(int) {
- ReverseZoneChunkListIterator<T> clone(*this);
- MoveRNext();
- return clone;
- }
+ DCHECK_LE(list->back_->position_, list->back_->capacity_);
+ if (list->back_->position_ == list->back_->capacity_) {
+ return ZoneChunkListIterator(list->back_->next_, 0);
+ }
- ReverseZoneChunkListIterator& operator--() {
- MoveNext();
- return *this;
+ return ZoneChunkListIterator(list->back_, list->back_->position_);
}
- ReverseZoneChunkListIterator operator--(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveNext();
- return clone;
- }
+ ZoneChunkListIterator(Chunk* current, size_t position)
+ : current_(current), position_(position) {}
- private:
- friend class ZoneChunkList<T>;
- static ReverseZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
- if (list->back_ == nullptr) return End(list);
- if (list->back_->position_ == 0) {
- if (list->back_->previous_ != nullptr) {
- return ReverseZoneChunkListIterator<T>(
- list->back_->previous_, list->back_->previous_->capacity_ - 1);
+ template <bool move_backward>
+ void Move() {
+ if (move_backward) {
+ // Move backwards.
+ if (position_ == 0) {
+ current_ = current_->previous_;
+ position_ = current_ ? current_->capacity_ - 1 : 0;
} else {
- return End(list);
+ --position_;
+ }
+ } else {
+ // Move forwards.
+ ++position_;
+ if (position_ >= current_->capacity_) {
+ current_ = current_->next_;
+ position_ = 0;
}
}
- return ReverseZoneChunkListIterator<T>(list->back_,
- list->back_->position_ - 1);
- }
- static ReverseZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
- return ReverseZoneChunkListIterator<T>(nullptr, 0);
}
-};
-template <typename T>
-size_t ZoneChunkList<T>::size() const {
- return size_;
-}
+ Chunk* current_;
+ size_t position_;
+};
template <typename T>
T& ZoneChunkList<T>::front() const {
@@ -327,6 +289,7 @@ void ZoneChunkList<T>::pop_back() {
back_ = back_->previous_;
}
--back_->position_;
+ --size_;
}
template <typename T>
@@ -380,18 +343,18 @@ void ZoneChunkList<T>::Rewind(const size_t limit) {
}
template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::Find(const size_t index) {
+typename ZoneChunkList<T>::iterator ZoneChunkList<T>::Find(const size_t index) {
SeekResult seek_result = SeekIndex(index);
- return ForwardZoneChunkListIterator<T>(seek_result.chunk_,
- seek_result.chunk_index_);
+ return typename ZoneChunkList<T>::iterator(seek_result.chunk_,
+ seek_result.chunk_index_);
}
template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::Find(
+typename ZoneChunkList<T>::const_iterator ZoneChunkList<T>::Find(
const size_t index) const {
SeekResult seek_result = SeekIndex(index);
- return ForwardZoneChunkListIterator<const T>(seek_result.chunk_,
- seek_result.chunk_index_);
+ return typename ZoneChunkList<T>::const_iterator(seek_result.chunk_,
+ seek_result.chunk_index_);
}
template <typename T>
@@ -407,47 +370,7 @@ void ZoneChunkList<T>::CopyTo(T* ptr) {
}
}
-template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::begin() {
- return ForwardZoneChunkListIterator<T>::Begin(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::end() {
- return ForwardZoneChunkListIterator<T>::End(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rbegin() {
- return ReverseZoneChunkListIterator<T>::Begin(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rend() {
- return ReverseZoneChunkListIterator<T>::End(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::begin() const {
- return ForwardZoneChunkListIterator<const T>::Begin(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::end() const {
- return ForwardZoneChunkListIterator<const T>::End(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rbegin() const {
- return ReverseZoneChunkListIterator<const T>::Begin(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rend() const {
- return ReverseZoneChunkListIterator<const T>::End(this);
-}
-
} // namespace internal
} // namespace v8
-#endif // V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#endif // V8_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 5e9fd0440a..c899bf340d 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_ZONE_ZONE_CONTAINERS_H_
-#define V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#ifndef V8_ZONE_ZONE_CONTAINERS_H_
+#define V8_ZONE_ZONE_CONTAINERS_H_
#include <deque>
#include <forward_list>
@@ -190,4 +190,4 @@ typedef ZoneVector<int> IntVector;
} // namespace internal
} // namespace v8
-#endif // V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#endif // V8_ZONE_ZONE_CONTAINERS_H_
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index 9abc89a30e..c5297902d8 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -195,11 +195,13 @@ class ZoneHandleSet<T>::const_iterator {
typedef std::forward_iterator_tag iterator_category;
typedef std::ptrdiff_t difference_type;
typedef Handle<T> value_type;
+ typedef value_type reference;
+ typedef value_type* pointer;
const_iterator(const const_iterator& other)
: set_(other.set_), current_(other.current_) {}
- Handle<T> operator*() const { return (*set_)[current_]; }
+ reference operator*() const { return (*set_)[current_]; }
bool operator==(const const_iterator& other) const {
return set_ == other.set_ && current_ == other.current_;
}